language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/data/processors/squad.py
{ "start": 23153, "end": 25360 }
class ____: """ A single training/test example for the Squad dataset, as loaded from disk. Args: qas_id: The example's unique identifier question_text: The question string context_text: The context string answer_text: The answer string start_position_character: The character position of the start of the answer title: The title of the example answers: None by default, this is used during evaluation. Holds answers as well as their start positions. is_impossible: False by default, set to True if the example has no possible answer. """ def __init__( self, qas_id, question_text, context_text, answer_text, start_position_character, title, answers=[], is_impossible=False, ): self.qas_id = qas_id self.question_text = question_text self.context_text = context_text self.answer_text = answer_text self.title = title self.is_impossible = is_impossible self.answers = answers self.start_position, self.end_position = 0, 0 doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True # Split on whitespace so that different tokens may be attributed to their original position. for c in self.context_text: if _is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) self.doc_tokens = doc_tokens self.char_to_word_offset = char_to_word_offset # Start and end positions only has a value during evaluation. if start_position_character is not None and not is_impossible: self.start_position = char_to_word_offset[start_position_character] self.end_position = char_to_word_offset[ min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1) ]
SquadExample
python
huggingface__transformers
src/transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py
{ "start": 6338, "end": 7243 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.config = config self.conv1 = nn.Conv2d(config.hidden_size, config.output_channels, kernel_size=1, bias=False) self.layer_norm1 = DeepseekVLHybridLayerNorm(config.output_channels, data_format="channels_first") self.conv2 = nn.Conv2d(config.output_channels, config.output_channels, kernel_size=3, padding=1, bias=False) self.layer_norm2 = DeepseekVLHybridLayerNorm(config.output_channels, data_format="channels_first") def forward(self, hidden_states): hidden_states = hidden_states.permute(0, 3, 1, 2) hidden_states = self.conv1(hidden_states) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) return hidden_states
DeepseekVLSamVisionNeck
python
doocs__leetcode
solution/0500-0599/0532.K-diff Pairs in an Array/Solution.py
{ "start": 0, "end": 299 }
class ____: def findPairs(self, nums: List[int], k: int) -> int: ans = set() vis = set() for x in nums: if x - k in vis: ans.add(x - k) if x + k in vis: ans.add(x) vis.add(x) return len(ans)
Solution
python
encode__django-rest-framework
tests/test_utils.py
{ "start": 686, "end": 718 }
class ____(APIView): pass
Root
python
openai__openai-python
src/openai/resources/fine_tuning/alpha/alpha.py
{ "start": 3004, "end": 3274 }
class ____: def __init__(self, alpha: AsyncAlpha) -> None: self._alpha = alpha @cached_property def graders(self) -> AsyncGradersWithStreamingResponse: return AsyncGradersWithStreamingResponse(self._alpha.graders)
AsyncAlphaWithStreamingResponse
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py
{ "start": 34836, "end": 35424 }
class ____(graphene.Mutation): """Frees the concurrency slots occupied by a specific run.""" Output = graphene.NonNull(graphene.Boolean) class Meta: name = "FreeConcurrencySlotsForRunMutation" class Arguments: runId = graphene.Argument(graphene.NonNull(graphene.String)) @capture_error @check_permission(Permissions.EDIT_CONCURRENCY_LIMIT) def mutate(self, graphene_info, runId: str): graphene_info.context.instance.event_log_storage.free_concurrency_slots_for_run(runId) return True
GrapheneFreeConcurrencySlotsForRunMutation
python
pypa__warehouse
tests/unit/macaroons/test_caveats.py
{ "start": 12758, "end": 14925 }
class ____: def test_verify_invalid_signature(self): m = Macaroon(location="somewhere", identifier="something", key=b"a secure key") status = verify( m, b"a different key", pretend.stub(), pretend.stub(), pretend.stub() ) assert not status assert status.msg == "signatures do not match" def test_caveat_returns_false(self): m = Macaroon(location="somewhere", identifier="something", key=b"a secure key") m.add_first_party_caveat(serialize(Expiration(expires_at=10, not_before=0))) status = verify( m, b"a secure key", pretend.stub(), pretend.stub(), pretend.stub() ) assert not status assert status.msg == "token is expired" def test_caveat_errors_on_deserialize(self): m = Macaroon(location="somewhere", identifier="something", key=b"a secure key") m.add_first_party_caveat(b"[]") status = verify( m, b"a secure key", pretend.stub(), pretend.stub(), pretend.stub() ) assert not status assert status.msg == "caveat array cannot be empty" def test_valid_caveat(self): now = int(time.time()) m = Macaroon(location="somewhere", identifier="something", key=b"a secure key") m.add_first_party_caveat( serialize(Expiration(expires_at=now + 1000, not_before=now - 1000)) ) status = verify( m, b"a secure key", pretend.stub(), pretend.stub(), pretend.stub() ) assert status assert status.msg == "signature and caveats OK" def test_generic_exception(self, monkeypatch): def _raiser(*args, **kwargs): raise Exception("my generic exception") monkeypatch.setattr(caveats, "deserialize", _raiser) m = Macaroon(location="somewhere", identifier="something", key=b"a secure key") m.add_first_party_caveat(serialize(Expiration(expires_at=1, not_before=1))) status = verify( m, b"a secure key", pretend.stub(), pretend.stub(), pretend.stub() ) assert not status assert status.msg == "unknown error"
TestVerification
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/dep_diamond_patch_mid1/package.py
{ "start": 217, "end": 608 }
class ____(Package): r"""Package that requires a patch on a dependency W / \ X Y \ / Z This is package X """ homepage = "http://www.example.com" url = "http://www.example.com/patch-a-dependency-1.0.tar.gz" version("1.0", md5="0123456789abcdef0123456789abcdef") # single patch file in repo depends_on("patch", patches="mid1.patch")
DepDiamondPatchMid1
python
dagster-io__dagster
python_modules/dagster/dagster/_core/execution/context/system.py
{ "start": 54484, "end": 55383 }
class ____(StepExecutionContext): """The context object provided to a :py:class:`@dagster_type_loader <dagster_type_loader>`-decorated function during execution. Users should not construct this object directly. """ @public @property def resources(self) -> "Resources": """The resources available to the type loader, specified by the `required_resource_keys` argument of the decorator.""" return super().resources @public @property def job_def(self) -> "JobDefinition": """The underlying job definition being executed.""" return super().job_def @property def repository_def(self) -> "RepositoryDefinition": return super().repository_def @public @property def op_def(self) -> "OpDefinition": """The op for which type loading is occurring.""" return super().op_def
DagsterTypeLoaderContext
python
huggingface__transformers
src/transformers/models/data2vec/modeling_data2vec_text.py
{ "start": 18007, "end": 20811 }
class ____(GradientCheckpointingLayer): def __init__(self, config, layer_idx=None): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = Data2VecTextAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = Data2VecTextAttention( config, is_causal=False, layer_idx=layer_idx, is_cross_attention=True, ) self.intermediate = Data2VecTextIntermediate(config) self.output = Data2VecTextOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: self_attention_output, _ = self.attention( hidden_states, attention_mask, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) attention_output = self_attention_output if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) cross_attention_output, _ = self.crossattention( self_attention_output, None, # attention_mask encoder_hidden_states, encoder_attention_mask, past_key_values=past_key_values, **kwargs, ) attention_output = cross_attention_output layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output @auto_docstring
Data2VecTextLayer
python
psf__black
tests/data/miscellaneous/force_pyi.py
{ "start": 487, "end": 597 }
class ____: def BMethod(self) -> None: ... @overload def BMethod(self, arg: List[str]) -> None: ...
B
python
pypa__pip
src/pip/_vendor/urllib3/_collections.py
{ "start": 641, "end": 3017 }
class ____(MutableMapping): """ Provides a thread-safe dict-like container which maintains up to ``maxsize`` keys while throwing away the least-recently-used keys beyond ``maxsize``. :param maxsize: Maximum number of recent elements to retain. :param dispose_func: Every time an item is evicted from the container, ``dispose_func(value)`` is called. Callback which will get called """ ContainerCls = OrderedDict def __init__(self, maxsize=10, dispose_func=None): self._maxsize = maxsize self.dispose_func = dispose_func self._container = self.ContainerCls() self.lock = RLock() def __getitem__(self, key): # Re-insert the item, moving it to the end of the eviction line. with self.lock: item = self._container.pop(key) self._container[key] = item return item def __setitem__(self, key, value): evicted_value = _Null with self.lock: # Possibly evict the existing value of 'key' evicted_value = self._container.get(key, _Null) self._container[key] = value # If we didn't evict an existing value, we might have to evict the # least recently used item from the beginning of the container. if len(self._container) > self._maxsize: _key, evicted_value = self._container.popitem(last=False) if self.dispose_func and evicted_value is not _Null: self.dispose_func(evicted_value) def __delitem__(self, key): with self.lock: value = self._container.pop(key) if self.dispose_func: self.dispose_func(value) def __len__(self): with self.lock: return len(self._container) def __iter__(self): raise NotImplementedError( "Iteration over this class is unlikely to be threadsafe." ) def clear(self): with self.lock: # Copy pointers to all values, then wipe the mapping values = list(itervalues(self._container)) self._container.clear() if self.dispose_func: for value in values: self.dispose_func(value) def keys(self): with self.lock: return list(iterkeys(self._container))
RecentlyUsedContainer
python
ethereum__web3.py
tests/core/utilities/test_attach_modules.py
{ "start": 382, "end": 453 }
class ____(Module): def block_number(self): return 42
MockEth
python
facebook__pyre-check
client/commands/report_any_expressions.py
{ "start": 2930, "end": 6222 }
class ____(json_mixins.SnakeCaseAndExcludeJsonMixin): path: str expression_statistics: ExpressionStatistics any_expressions: List[AnyExpression] @staticmethod def from_typed_backend_data( data: Union[ expression_level_coverage.CoverageAtPathResponse, expression_level_coverage.ErrorAtPathResponse, ], root_path: Path, ) -> ModuleExpressionData: if isinstance(data, expression_level_coverage.CoverageAtPathResponse): coverage_at_path = data.CoverageAtPath return ModuleExpressionData( path=relative_path(coverage_at_path.path, root_path), any_expressions=[ AnyExpression.from_typed_backend_data(coverage_gap) for coverage_gap in coverage_at_path.coverage_gaps ], expression_statistics=ExpressionStatistics.from_coverage_at_path( coverage_at_path ), ) else: error_at_path = data.ErrorAtPath return ModuleExpressionData( path=relative_path(error_at_path.path, root_path), any_expressions=[], expression_statistics=ExpressionStatistics.from_error( error_at_path.error ), ) def get_module_paths( configuration: frontend_configuration.Base, paths: Optional[List[Path]], ) -> List[Path]: if paths is None: paths = [ configuration.get_local_root() or configuration.get_global_root(), ] return list( coverage_data.find_module_paths( paths=paths, excludes=configuration.get_excludes(), ) ) def print_data_as_json(data: Sequence[ModuleExpressionData]) -> None: raw_data = [module_data.to_dict() for module_data in data] json.dump(raw_data, log.stdout) def query_backend( configuration: frontend_configuration.Base, paths: Optional[List[Path]], ) -> query_response.Response: socket_path = daemon_socket.get_socket_path( configuration.get_project_identifier(), flavor=identifiers.PyreFlavor.CLASSIC, ) module_paths = get_module_paths( configuration=configuration, paths=paths, ) with tempfile.NamedTemporaryFile("w") as paths_file: paths_file.write("\n".join(str(path) for path in module_paths)) paths_file.flush() query_string = f'expression_level_coverage("@{paths_file.name}")' return daemon_query.execute_query(socket_path, query_string) def run( configuration: frontend_configuration.Base, paths: Optional[List[Path]], ) -> int: raw_response = query_backend( configuration=configuration, paths=paths, ) typed_response = expression_level_coverage._make_expression_level_coverage_response( raw_response.payload ) project_root = configuration.get_local_root() or configuration.get_global_root() report = [ ModuleExpressionData.from_typed_backend_data( path_response, project_root, ) for path_response in typed_response.response ] print_data_as_json(report) return commands.ExitCode.SUCCESS
ModuleExpressionData
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/task_instance_history.py
{ "start": 1316, "end": 2454 }
class ____(BaseModel): """TaskInstanceHistory serializer for responses.""" task_id: str dag_id: str # todo: this should not be aliased; it's ambiguous with dag run's "id" - airflow 3.0 run_id: str = Field(alias="dag_run_id") map_index: int start_date: datetime | None end_date: datetime | None duration: float | None state: TaskInstanceState | None try_number: int max_tries: int task_display_name: str dag_display_name: str = Field(validation_alias=AliasPath("dag_run", "dag_model", "dag_display_name")) hostname: str | None unixname: str | None pool: str pool_slots: int queue: str | None priority_weight: int | None operator: str | None custom_operator_name: str | None = Field(alias="operator_name") queued_dttm: datetime | None = Field(alias="queued_when") scheduled_dttm: datetime | None = Field(alias="scheduled_when") pid: int | None executor: str | None executor_config: Annotated[str, BeforeValidator(str)] dag_version: DagVersionResponse | None hitl_detail: HITLDetailHistory | None
TaskInstanceHistoryResponse
python
facebook__pyre-check
client/tests/frontend_configuration_test.py
{ "start": 580, "end": 8147 }
class ____(testslide.TestCase): def test_dot_pyre_directory(self) -> None: self.assertEqual( frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("foo"), dot_pyre_directory=Path(".pyre") ) ).get_dot_pyre_directory(), Path(".pyre"), ) self.assertEqual( frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("foo"), dot_pyre_directory=None ) ).get_dot_pyre_directory(), Path("foo") / find_directories.LOG_DIRECTORY, ) def test_log_directory(self) -> None: def assert_log_directory( expected: Path, dot_pyre_directory: Path, relative_local_root: Optional[str] = None, ) -> None: self.assertEqual( frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("foo"), dot_pyre_directory=dot_pyre_directory, relative_local_root=relative_local_root, ) ).get_log_directory(), expected, ) assert_log_directory(dot_pyre_directory=Path(".pyre"), expected=Path(".pyre")) assert_log_directory( dot_pyre_directory=Path(".pyre"), relative_local_root="bar", expected=Path(".pyre/bar"), ) assert_log_directory( dot_pyre_directory=Path(".pyre"), relative_local_root="bar/baz", expected=Path(".pyre/bar/baz"), ) def test_get_binary_from_configuration(self) -> None: with switch_environment({}): start_command = frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("irrelevant"), dot_pyre_directory=Path(".pyre"), binary="foo", ) ).get_server_start_command() self.assertIsNotNone(start_command) self.assertEqual( start_command.get_pyre_binary_location(), "foo", ) def test_get_binary_auto_determined(self) -> None: self.mock_callable(shutil, "which").for_call( find_directories.BINARY_NAME ).to_return_value("foo").and_assert_called_once() with switch_environment({}): start_command = frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("irrelevant"), dot_pyre_directory=Path(".pyre"), ) ).get_server_start_command() self.assertIsNotNone(start_command) ( self.assertEqual( start_command.get_pyre_binary_location(), "foo", ), ) def test_get_binary_cannot_auto_determine(self) -> None: self.mock_callable(shutil, "which").to_return_value(None).and_assert_called() with switch_environment({}): self.assertIsNone( frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("irrelevant"), dot_pyre_directory=Path(".pyre"), binary=None, ) ).get_server_start_command(), ) def test_typeshed_existent_search_path(self) -> None: with tempfile.TemporaryDirectory() as root: root_path = Path(root) ensure_directories_exists(root_path, ["a"]) ensure_directories_exists( root_path, ["typeshed/stdlib", "typeshed/stubs/foo"] ) self.assertListEqual( frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("irrelevant"), dot_pyre_directory=Path(".pyre"), search_path=[ SimpleRawElement(str(root_path / "a")), ], typeshed=str(root_path / "typeshed"), ) ).get_existent_typeshed_search_paths(), [ SimpleElement(str(root_path / "typeshed/stdlib")), SimpleElement(str(root_path / "typeshed/stubs/foo")), ], ) def test_existent_search_path_with_typeshed(self) -> None: with tempfile.TemporaryDirectory() as root: root_path = Path(root) ensure_directories_exists(root_path, ["a"]) ensure_directories_exists( root_path, ["typeshed/stdlib", "typeshed/stubs/foo"] ) self.assertListEqual( frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("irrelevant"), dot_pyre_directory=Path(".pyre"), search_path=[ SimpleRawElement(str(root_path / "a")), ], typeshed=str(root_path / "typeshed"), ) ).get_existent_search_paths(), [ SimpleElement(str(root_path / "a")), SimpleElement(str(root_path / "typeshed/stdlib")), SimpleElement(str(root_path / "typeshed/stubs/foo")), ], ) def test_get_typeshed_from_configuration(self) -> None: with switch_environment({}): self.assertEqual( frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("irrelevant"), dot_pyre_directory=Path(".pyre"), typeshed="foo", ) ).get_typeshed_location(), Path("foo"), ) def test_get_typeshed_auto_determined(self) -> None: self.mock_callable( find_directories, "find_typeshed" ).for_call().to_return_value(Path("foo")).and_assert_called_once() with switch_environment({}): self.assertEqual( frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("irrelevant"), dot_pyre_directory=Path(".pyre"), typeshed=None, ) ).get_typeshed_location(), Path("foo"), ) def test_get_typeshed_cannot_auto_determine(self) -> None: self.mock_callable( find_directories, "find_typeshed" ).for_call().to_return_value(None).and_assert_called_once() with switch_environment({}): self.assertIsNone( frontend_configuration.OpenSource( configuration_module.Configuration( global_root=Path("irrelevant"), dot_pyre_directory=Path(".pyre"), typeshed=None, ) ).get_typeshed_location(), )
FrontendConfigurationTest
python
getsentry__sentry
src/sentry/constants.py
{ "start": 18193, "end": 19037 }
class ____: PENDING = 0 INSTALLED = 1 PENDING_DELETION = 2 PENDING_STR = "pending" INSTALLED_STR = "installed" PENDING_DELETION_STR = "pending_deletion" @classmethod def as_choices(cls) -> Sequence[tuple[int, str]]: return ( (cls.PENDING, cls.PENDING_STR), (cls.INSTALLED, cls.INSTALLED_STR), (cls.PENDING_DELETION, cls.PENDING_DELETION_STR), ) @classmethod def as_str(cls, status: int) -> str: if status == cls.PENDING: return cls.PENDING_STR elif status == cls.INSTALLED: return cls.INSTALLED_STR elif status == cls.PENDING_DELETION: return cls.PENDING_DELETION_STR else: raise ValueError(f"Not a SentryAppInstallationStatus int: {status!r}")
SentryAppInstallationStatus
python
dagster-io__dagster
python_modules/dagster/dagster/_core/remote_representation/external_data.py
{ "start": 7052, "end": 7153 }
class ____(NamedTuple): type: EnvVarConsumerType name: str @whitelist_for_serdes
EnvVarConsumer
python
django__django
tests/model_inheritance/models.py
{ "start": 4775, "end": 4830 }
class ____(FirstParent, SecondParent): pass
CommonChild
python
scikit-learn__scikit-learn
sklearn/_loss/loss.py
{ "start": 19881, "end": 21052 }
class ____(BaseLoss): """Absolute error with identity link, for regression. Domain: y_true and y_pred all real numbers Link: y_pred = raw_prediction For a given sample x_i, the absolute error is defined as:: loss(x_i) = |y_true_i - raw_prediction_i| Note that the exact hessian = 0 almost everywhere (except at one point, therefore differentiable = False). Optimization routines like in HGBT, however, need a hessian > 0. Therefore, we assign 1. """ differentiable = False need_update_leaves_values = True def __init__(self, sample_weight=None): super().__init__(closs=CyAbsoluteError(), link=IdentityLink()) self.approx_hessian = True self.constant_hessian = sample_weight is None def fit_intercept_only(self, y_true, sample_weight=None): """Compute raw_prediction of an intercept-only model. This is the weighted median of the target, i.e. over the samples axis=0. """ if sample_weight is None: return np.median(y_true, axis=0) else: return _weighted_percentile(y_true, sample_weight, 50)
AbsoluteError
python
kamyu104__LeetCode-Solutions
Python/maximum-white-tiles-covered-by-a-carpet.py
{ "start": 76, "end": 817 }
class ____(object): def maximumWhiteTiles(self, tiles, carpetLen): """ :type tiles: List[List[int]] :type carpetLen: int :rtype: int """ tiles.sort() result = right = gap = 0 for left, (l, _) in enumerate(tiles): if left-1 >= 0: gap -= tiles[left][0]-tiles[left-1][1]-1 r = l+carpetLen-1 while right+1 < len(tiles) and r+1 >= tiles[right+1][0]: right += 1 gap += tiles[right][0]-tiles[right-1][1]-1 result = max(result, min(tiles[right][1]-tiles[left][0]+1, carpetLen)-gap) return result # Time: O(nlogn) # Space: O(1) # sliding window, optimized from solution4
Solution
python
pytorch__pytorch
test/test_cuda.py
{ "start": 242125, "end": 243773 }
class ____(TestCase): def _get_tmp_dir_fs_type(self): my_path = os.path.realpath("/tmp") root_type = "" for part in psutil.disk_partitions(): if part.mountpoint == "/": root_type = part.fstype continue if part.mountpoint == my_path: return part.fstype return root_type @unittest.skip("Disabling as USE_CUFILE=0 by default in builds") def test_gds_read_write_tensors(self): if self._get_tmp_dir_fs_type() not in ("ext4", "xfs"): self.skipTest("GPUDirect Storage requires ext4/xfs for local filesystem") src1 = torch.randn(1024, device="cuda") src2 = torch.randn(2, 1024, device="cuda") torch.cuda.gds.gds_register_buffer(src1.untyped_storage()) torch.cuda.gds.gds_register_buffer(src2.untyped_storage()) dest1 = torch.empty(1024, device="cuda") dest2 = torch.empty(2, 1024, device="cuda") with TemporaryFileName() as f: file = torch.cuda.gds.GdsFile(f, os.O_CREAT | os.O_RDWR) file.save_storage(src1.untyped_storage(), offset=0) file.save_storage(src2.untyped_storage(), offset=src1.nbytes) file.load_storage(dest1.untyped_storage(), offset=0) file.load_storage(dest2.untyped_storage(), offset=src1.nbytes) self.assertEqual(src1, dest1) self.assertEqual(src2, dest2) torch.cuda.gds.gds_deregister_buffer(src1.untyped_storage()) torch.cuda.gds.gds_deregister_buffer(src2.untyped_storage()) @unittest.skipIf(not TEST_CUDA, "CUDA not available, skipping tests")
TestGDS
python
weaviate__weaviate-python-client
weaviate/config.py
{ "start": 135, "end": 1199 }
class ____: session_pool_connections: int = 20 session_pool_maxsize: int = 100 session_pool_max_retries: int = 3 session_pool_timeout: int = 5 def __post_init__(self) -> None: if not isinstance(self.session_pool_connections, int): raise TypeError( f"session_pool_connections must be {int}, received {type(self.session_pool_connections)}" ) if not isinstance(self.session_pool_maxsize, int): raise TypeError( f"session_pool_maxsize must be {int}, received {type(self.session_pool_maxsize)}" ) if not isinstance(self.session_pool_max_retries, int): raise TypeError( f"session_pool_max_retries must be {int}, received {type(self.session_pool_max_retries)}" ) if not isinstance(self.session_pool_timeout, int): raise TypeError( f"session_pool_timeout must be {int}, received {type(self.session_pool_timeout)}" ) # used in v3 only @dataclass
ConnectionConfig
python
walkccc__LeetCode
solutions/2117. Abbreviating the Product of a Range/2117.py
{ "start": 0, "end": 662 }
class ____: def abbreviateProduct(self, left: int, right: int) -> str: prod = 1.0 suf = 1 countDigits = 0 countZeros = 0 for num in range(left, right + 1): prod *= num while prod >= 1.0: prod /= 10 countDigits += 1 suf *= num while suf % 10 == 0: suf //= 10 countZeros += 1 if suf > 10**8: suf %= 10**8 if countDigits - countZeros <= 10: tens = 10**(countDigits - countZeros) return str(int(prod * tens + 0.5)) + 'e' + str(countZeros) pre = str(int(prod * 10 ** 5)) suf = str(suf)[-5:] return pre + '...' + suf + 'e' + str(countZeros)
Solution
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_generators.py
{ "start": 23311, "end": 51469 }
class ____(__TestCase): def test_generator_gi_yieldfrom(self): def a(): self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_RUNNING) self.assertIsNone(gen_b.gi_yieldfrom) yield self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_RUNNING) self.assertIsNone(gen_b.gi_yieldfrom) def b(): self.assertIsNone(gen_b.gi_yieldfrom) yield from a() self.assertIsNone(gen_b.gi_yieldfrom) yield self.assertIsNone(gen_b.gi_yieldfrom) gen_b = b() self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_CREATED) self.assertIsNone(gen_b.gi_yieldfrom) gen_b.send(None) self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_SUSPENDED) self.assertEqual(gen_b.gi_yieldfrom.gi_code.co_name, 'a') gen_b.send(None) self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_SUSPENDED) self.assertIsNone(gen_b.gi_yieldfrom) [] = gen_b # Exhaust generator self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_CLOSED) self.assertIsNone(gen_b.gi_yieldfrom) tutorial_tests = """ Let's try a simple generator: >>> def f(): ... yield 1 ... yield 2 >>> for i in f(): ... print(i) 1 2 >>> g = f() >>> next(g) 1 >>> next(g) 2 "Falling off the end" stops the generator: >>> next(g) Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 2, in g StopIteration "return" also stops the generator: >>> def f(): ... yield 1 ... return ... yield 2 # never reached ... >>> g = f() >>> next(g) 1 >>> next(g) Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 3, in f StopIteration >>> next(g) # once stopped, can't be resumed Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration However, "return" and StopIteration are not exactly equivalent: >>> def g1(): ... try: ... return ... except: ... yield 1 ... >>> list(g1()) [] >>> def g2(): ... try: ... raise StopIteration ... except: ... yield 42 >>> print(list(g2())) [42] This may be surprising at first: >>> def g3(): ... try: ... return ... finally: ... yield 1 ... >>> list(g3()) [1] Let's create an alternate range() function implemented as a generator: >>> def yrange(n): ... for i in range(n): ... yield i ... >>> list(yrange(5)) [0, 1, 2, 3, 4] Generators always return to the most recent caller: >>> def creator(): ... r = yrange(5) ... print("creator", next(r)) ... return r ... >>> def caller(): ... r = creator() ... for i in r: ... print("caller", i) ... >>> caller() creator 0 caller 1 caller 2 caller 3 caller 4 Generators can call other generators: >>> def zrange(n): ... for i in yrange(n): ... yield i ... >>> list(zrange(5)) [0, 1, 2, 3, 4] """ # The examples from PEP 255. pep_tests = """ Specification: Yield Restriction: A generator cannot be resumed while it is actively running: >>> def g(): ... i = next(me) ... yield i >>> me = g() >>> next(me) Traceback (most recent call last): ... File "<string>", line 2, in g ValueError: generator already executing Specification: Return Note that return isn't always equivalent to raising StopIteration: the difference lies in how enclosing try/except constructs are treated. For example, >>> def f1(): ... try: ... return ... except: ... yield 1 >>> print(list(f1())) [] because, as in any function, return simply exits, but >>> def f2(): ... try: ... raise StopIteration ... except: ... yield 42 >>> print(list(f2())) [42] because StopIteration is captured by a bare "except", as is any exception. Specification: Generators and Exception Propagation >>> def f(): ... return 1//0 >>> def g(): ... yield f() # the zero division exception propagates ... yield 42 # and we'll never get here >>> k = g() >>> next(k) Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 2, in g File "<stdin>", line 2, in f ZeroDivisionError: integer division or modulo by zero >>> next(k) # and the generator cannot be resumed Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration >>> Specification: Try/Except/Finally >>> def f(): ... try: ... yield 1 ... try: ... yield 2 ... 1//0 ... yield 3 # never get here ... except ZeroDivisionError: ... yield 4 ... yield 5 ... raise ... except: ... yield 6 ... yield 7 # the "raise" above stops this ... except: ... yield 8 ... yield 9 ... try: ... x = 12 ... finally: ... yield 10 ... yield 11 >>> print(list(f())) [1, 2, 4, 5, 8, 9, 10, 11] >>> Guido's binary tree example. >>> # A binary tree class. >>> class Tree: ... ... def __init__(self, label, left=None, right=None): ... self.label = label ... self.left = left ... self.right = right ... ... def __repr__(self, level=0, indent=" "): ... s = level*indent + repr(self.label) ... if self.left: ... s = s + "\\n" + self.left.__repr__(level+1, indent) ... if self.right: ... s = s + "\\n" + self.right.__repr__(level+1, indent) ... return s ... ... def __iter__(self): ... return inorder(self) >>> # Create a Tree from a list. >>> def tree(list): ... n = len(list) ... if n == 0: ... return [] ... i = n // 2 ... return Tree(list[i], tree(list[:i]), tree(list[i+1:])) >>> # Show it off: create a tree. >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ") >>> # A recursive generator that generates Tree labels in in-order. >>> def inorder(t): ... if t: ... for x in inorder(t.left): ... yield x ... yield t.label ... for x in inorder(t.right): ... yield x >>> # Show it off: create a tree. >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ") >>> # Print the nodes of the tree in in-order. >>> for x in t: ... print(' '+x, end='') A B C D E F G H I J K L M N O P Q R S T U V W X Y Z >>> # A non-recursive generator. >>> def inorder(node): ... stack = [] ... while node: ... while node.left: ... stack.append(node) ... node = node.left ... yield node.label ... while not node.right: ... try: ... node = stack.pop() ... except IndexError: ... return ... yield node.label ... node = node.right >>> # Exercise the non-recursive generator. >>> for x in t: ... print(' '+x, end='') A B C D E F G H I J K L M N O P Q R S T U V W X Y Z """ # Examples from Iterator-List and Python-Dev and c.l.py. email_tests = """ The difference between yielding None and returning it. >>> def g(): ... for i in range(3): ... yield None ... yield None ... return >>> list(g()) [None, None, None, None] Ensure that explicitly raising StopIteration acts like any other exception in try/except, not like a return. >>> def g(): ... yield 1 ... try: ... raise StopIteration ... except: ... yield 2 ... yield 3 >>> list(g()) [1, 2, 3] Next one was posted to c.l.py. >>> def gcomb(x, k): ... "Generate all combinations of k elements from list x." ... ... if k > len(x): ... return ... if k == 0: ... yield [] ... else: ... first, rest = x[0], x[1:] ... # A combination does or doesn't contain first. ... # If it does, the remainder is a k-1 comb of rest. ... for c in gcomb(rest, k-1): ... c.insert(0, first) ... yield c ... # If it doesn't contain first, it's a k comb of rest. ... for c in gcomb(rest, k): ... yield c >>> seq = list(range(1, 5)) >>> for k in range(len(seq) + 2): ... print("%d-combs of %s:" % (k, seq)) ... for c in gcomb(seq, k): ... print(" ", c) 0-combs of [1, 2, 3, 4]: [] 1-combs of [1, 2, 3, 4]: [1] [2] [3] [4] 2-combs of [1, 2, 3, 4]: [1, 2] [1, 3] [1, 4] [2, 3] [2, 4] [3, 4] 3-combs of [1, 2, 3, 4]: [1, 2, 3] [1, 2, 4] [1, 3, 4] [2, 3, 4] 4-combs of [1, 2, 3, 4]: [1, 2, 3, 4] 5-combs of [1, 2, 3, 4]: From the Iterators list, about the types of these things. >>> def g(): ... yield 1 ... >>> type(g) <class 'function'> >>> i = g() >>> type(i) <class 'generator'> >>> [s for s in dir(i) if not s.startswith('_')] ['close', 'gi_code', 'gi_frame', 'gi_running', 'gi_suspended', 'gi_yieldfrom', 'send', 'throw'] >>> from test.support import HAVE_DOCSTRINGS >>> print(i.__next__.__doc__ if HAVE_DOCSTRINGS else 'Implement next(self).') Implement next(self). >>> iter(i) is i True >>> import types >>> isinstance(i, types.GeneratorType) True And more, added later. >>> i.gi_running 0 >>> type(i.gi_frame) <class 'frame'> >>> i.gi_running = 42 Traceback (most recent call last): ... AttributeError: attribute 'gi_running' of 'generator' objects is not writable >>> def g(): ... yield me.gi_running >>> me = g() >>> me.gi_running 0 >>> next(me) 1 >>> me.gi_running 0 A clever union-find implementation from c.l.py, due to David Eppstein. Sent: Friday, June 29, 2001 12:16 PM To: python-list@python.org Subject: Re: PEP 255: Simple Generators >>> class disjointSet: ... def __init__(self, name): ... self.name = name ... self.parent = None ... self.generator = self.generate() ... ... def generate(self): ... while not self.parent: ... yield self ... for x in self.parent.generator: ... yield x ... ... def find(self): ... return next(self.generator) ... ... def union(self, parent): ... if self.parent: ... raise ValueError("Sorry, I'm not a root!") ... self.parent = parent ... ... def __str__(self): ... return self.name >>> names = "ABCDEFGHIJKLM" >>> sets = [disjointSet(name) for name in names] >>> roots = sets[:] >>> import random >>> gen = random.Random(42) >>> while 1: ... for s in sets: ... print(" %s->%s" % (s, s.find()), end='') ... print() ... if len(roots) > 1: ... s1 = gen.choice(roots) ... roots.remove(s1) ... s2 = gen.choice(roots) ... s1.union(s2) ... print("merged", s1, "into", s2) ... else: ... break A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M merged K into B A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M merged A into F A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M merged E into F A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M merged D into C A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M merged M into C A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C merged J into B A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C merged B into C A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C merged F into G A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C merged L into C A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C merged G into I A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C merged I into H A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C merged C into H A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H """ # Emacs turd ' # Fun tests (for sufficiently warped notions of "fun"). fun_tests = """ Build up to a recursive Sieve of Eratosthenes generator. >>> def firstn(g, n): ... return [next(g) for i in range(n)] >>> def intsfrom(i): ... while 1: ... yield i ... i += 1 >>> firstn(intsfrom(5), 7) [5, 6, 7, 8, 9, 10, 11] >>> def exclude_multiples(n, ints): ... for i in ints: ... if i % n: ... yield i >>> firstn(exclude_multiples(3, intsfrom(1)), 6) [1, 2, 4, 5, 7, 8] >>> def sieve(ints): ... prime = next(ints) ... yield prime ... not_divisible_by_prime = exclude_multiples(prime, ints) ... for p in sieve(not_divisible_by_prime): ... yield p >>> primes = sieve(intsfrom(2)) >>> firstn(primes, 20) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71] Another famous problem: generate all integers of the form 2**i * 3**j * 5**k in increasing order, where i,j,k >= 0. Trickier than it may look at first! Try writing it without generators, and correctly, and without generating 3 internal results for each result output. >>> def times(n, g): ... for i in g: ... yield n * i >>> firstn(times(10, intsfrom(1)), 10) [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] >>> def merge(g, h): ... ng = next(g) ... nh = next(h) ... while 1: ... if ng < nh: ... yield ng ... ng = next(g) ... elif ng > nh: ... yield nh ... nh = next(h) ... else: ... yield ng ... ng = next(g) ... nh = next(h) The following works, but is doing a whale of a lot of redundant work -- it's not clear how to get the internal uses of m235 to share a single generator. Note that me_times2 (etc) each need to see every element in the result sequence. So this is an example where lazy lists are more natural (you can look at the head of a lazy list any number of times). >>> def m235(): ... yield 1 ... me_times2 = times(2, m235()) ... me_times3 = times(3, m235()) ... me_times5 = times(5, m235()) ... for i in merge(merge(me_times2, ... me_times3), ... me_times5): ... yield i Don't print "too many" of these -- the implementation above is extremely inefficient: each call of m235() leads to 3 recursive calls, and in turn each of those 3 more, and so on, and so on, until we've descended enough levels to satisfy the print stmts. Very odd: when I printed 5 lines of results below, this managed to screw up Win98's malloc in "the usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting address space, and it *looked* like a very slow leak. >>> result = m235() >>> for i in range(3): ... print(firstn(result, 15)) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] Heh. Here's one way to get a shared list, complete with an excruciating namespace renaming trick. The *pretty* part is that the times() and merge() functions can be reused as-is, because they only assume their stream arguments are iterable -- a LazyList is the same as a generator to times(). >>> class LazyList: ... def __init__(self, g): ... self.sofar = [] ... self.fetch = g.__next__ ... ... def __getitem__(self, i): ... sofar, fetch = self.sofar, self.fetch ... while i >= len(sofar): ... sofar.append(fetch()) ... return sofar[i] >>> def m235(): ... yield 1 ... # Gack: m235 below actually refers to a LazyList. ... me_times2 = times(2, m235) ... me_times3 = times(3, m235) ... me_times5 = times(5, m235) ... for i in merge(merge(me_times2, ... me_times3), ... me_times5): ... yield i Print as many of these as you like -- *this* implementation is memory- efficient. >>> m235 = LazyList(m235()) >>> for i in range(5): ... print([m235[j] for j in range(15*i, 15*(i+1))]) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] [200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384] [400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675] Ye olde Fibonacci generator, LazyList style. >>> def fibgen(a, b): ... ... def sum(g, h): ... while 1: ... yield next(g) + next(h) ... ... def tail(g): ... next(g) # throw first away ... for x in g: ... yield x ... ... yield a ... yield b ... for s in sum(iter(fib), ... tail(iter(fib))): ... yield s >>> fib = LazyList(fibgen(1, 2)) >>> firstn(iter(fib), 17) [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584] Running after your tail with itertools.tee (new in version 2.4) The algorithms "m235" (Hamming) and Fibonacci presented above are both examples of a whole family of FP (functional programming) algorithms where a function produces and returns a list while the production algorithm suppose the list as already produced by recursively calling itself. For these algorithms to work, they must: - produce at least a first element without presupposing the existence of the rest of the list - produce their elements in a lazy manner To work efficiently, the beginning of the list must not be recomputed over and over again. This is ensured in most FP languages as a built-in feature. In python, we have to explicitly maintain a list of already computed results and abandon genuine recursivity. This is what had been attempted above with the LazyList class. One problem with that class is that it keeps a list of all of the generated results and therefore continually grows. This partially defeats the goal of the generator concept, viz. produce the results only as needed instead of producing them all and thereby wasting memory. Thanks to itertools.tee, it is now clear "how to get the internal uses of m235 to share a single generator". >>> from itertools import tee >>> def m235(): ... def _m235(): ... yield 1 ... for n in merge(times(2, m2), ... merge(times(3, m3), ... times(5, m5))): ... yield n ... m1 = _m235() ... m2, m3, m5, mRes = tee(m1, 4) ... return mRes >>> it = m235() >>> for i in range(5): ... print(firstn(it, 15)) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] [200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384] [400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675] The "tee" function does just what we want. It internally keeps a generated result for as long as it has not been "consumed" from all of the duplicated iterators, whereupon it is deleted. You can therefore print the hamming sequence during hours without increasing memory usage, or very little. The beauty of it is that recursive running-after-their-tail FP algorithms are quite straightforwardly expressed with this Python idiom. Ye olde Fibonacci generator, tee style. >>> def fib(): ... ... def _isum(g, h): ... while 1: ... yield next(g) + next(h) ... ... def _fib(): ... yield 1 ... yield 2 ... next(fibTail) # throw first away ... for res in _isum(fibHead, fibTail): ... yield res ... ... realfib = _fib() ... fibHead, fibTail, fibRes = tee(realfib, 3) ... return fibRes >>> firstn(fib(), 17) [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584] """ # syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0 # hackery. syntax_tests = """ These are fine: >>> def f(): ... yield 1 ... return >>> def f(): ... try: ... yield 1 ... finally: ... pass >>> def f(): ... try: ... try: ... 1//0 ... except ZeroDivisionError: ... yield 666 ... except: ... pass ... finally: ... pass >>> def f(): ... try: ... try: ... yield 12 ... 1//0 ... except ZeroDivisionError: ... yield 666 ... except: ... try: ... x = 12 ... finally: ... yield 12 ... except: ... return >>> list(f()) [12, 666] >>> def f(): ... yield >>> type(f()) <class 'generator'> >>> def f(): ... if 0: ... yield >>> type(f()) <class 'generator'> >>> def f(): ... if 0: ... yield 1 >>> type(f()) <class 'generator'> >>> def f(): ... if "": ... yield None >>> type(f()) <class 'generator'> >>> def f(): ... return ... try: ... if x==4: ... pass ... elif 0: ... try: ... 1//0 ... except SyntaxError: ... pass ... else: ... if 0: ... while 12: ... x += 1 ... yield 2 # don't blink ... f(a, b, c, d, e) ... else: ... pass ... except: ... x = 1 ... return >>> type(f()) <class 'generator'> >>> def f(): ... if 0: ... def g(): ... yield 1 ... >>> type(f()) <class 'NoneType'> >>> def f(): ... if 0: ... class C: ... def __init__(self): ... yield 1 ... def f(self): ... yield 2 >>> type(f()) <class 'NoneType'> >>> def f(): ... if 0: ... return ... if 0: ... yield 2 >>> type(f()) <class 'generator'> This one caused a crash (see SF bug 567538): >>> def f(): ... for i in range(3): ... try: ... continue ... finally: ... yield i ... >>> g = f() >>> print(next(g)) 0 >>> print(next(g)) 1 >>> print(next(g)) 2 >>> print(next(g)) Traceback (most recent call last): StopIteration Test the gi_code attribute >>> def f(): ... yield 5 ... >>> g = f() >>> g.gi_code is f.__code__ True >>> next(g) 5 >>> next(g) Traceback (most recent call last): StopIteration >>> g.gi_code is f.__code__ True Test the __name__ attribute and the repr() >>> def f(): ... yield 5 ... >>> g = f() >>> g.__name__ 'f' >>> repr(g) # doctest: +ELLIPSIS '<generator object f at ...>' Lambdas shouldn't have their usual return behavior. >>> x = lambda: (yield 1) >>> list(x()) [1] >>> x = lambda: ((yield 1), (yield 2)) >>> list(x()) [1, 2] """ # conjoin is a simple backtracking generator, named in honor of Icon's # "conjunction" control structure. Pass a list of no-argument functions # that return iterable objects. Easiest to explain by example: assume the # function list [x, y, z] is passed. Then conjoin acts like: # # def g(): # values = [None] * 3 # for values[0] in x(): # for values[1] in y(): # for values[2] in z(): # yield values # # So some 3-lists of values *may* be generated, each time we successfully # get into the innermost loop. If an iterator fails (is exhausted) before # then, it "backtracks" to get the next value from the nearest enclosing # iterator (the one "to the left"), and starts all over again at the next # slot (pumps a fresh iterator). Of course this is most useful when the # iterators have side-effects, so that which values *can* be generated at # each slot depend on the values iterated at previous slots. def simple_conjoin(gs): values = [None] * len(gs) def gen(i): if i >= len(gs): yield values else: for values[i] in gs[i](): for x in gen(i+1): yield x for x in gen(0): yield x # That works fine, but recursing a level and checking i against len(gs) for # each item produced is inefficient. By doing manual loop unrolling across # generator boundaries, it's possible to eliminate most of that overhead. # This isn't worth the bother *in general* for generators, but conjoin() is # a core building block for some CPU-intensive generator applications. def conjoin(gs): n = len(gs) values = [None] * n # Do one loop nest at time recursively, until the # of loop nests # remaining is divisible by 3. def gen(i): if i >= n: yield values elif (n-i) % 3: ip1 = i+1 for values[i] in gs[i](): for x in gen(ip1): yield x else: for x in _gen3(i): yield x # Do three loop nests at a time, recursing only if at least three more # remain. Don't call directly: this is an internal optimization for # gen's use. def _gen3(i): assert i < n and (n-i) % 3 == 0 ip1, ip2, ip3 = i+1, i+2, i+3 g, g1, g2 = gs[i : ip3] if ip3 >= n: # These are the last three, so we can yield values directly. for values[i] in g(): for values[ip1] in g1(): for values[ip2] in g2(): yield values else: # At least 6 loop nests remain; peel off 3 and recurse for the # rest. for values[i] in g(): for values[ip1] in g1(): for values[ip2] in g2(): for x in _gen3(ip3): yield x for x in gen(0): yield x # And one more approach: For backtracking apps like the Knight's Tour # solver below, the number of backtracking levels can be enormous (one # level per square, for the Knight's Tour, so that e.g. a 100x100 board # needs 10,000 levels). In such cases Python is likely to run out of # stack space due to recursion. So here's a recursion-free version of # conjoin too. # NOTE WELL: This allows large problems to be solved with only trivial # demands on stack space. Without explicitly resumable generators, this is # much harder to achieve. OTOH, this is much slower (up to a factor of 2) # than the fancy unrolled recursive conjoin. def flat_conjoin(gs): # rename to conjoin to run tests with this instead n = len(gs) values = [None] * n iters = [None] * n _StopIteration = StopIteration # make local because caught a *lot* i = 0 while 1: # Descend. try: while i < n: it = iters[i] = gs[i]().__next__ values[i] = it() i += 1 except _StopIteration: pass else: assert i == n yield values # Backtrack until an older iterator can be resumed. i -= 1 while i >= 0: try: values[i] = iters[i]() # Success! Start fresh at next level. i += 1 break except _StopIteration: # Continue backtracking. i -= 1 else: assert i < 0 break # A conjoin-based N-Queens solver.
YieldFromTests
python
TheAlgorithms__Python
cellular_automata/wa_tor.py
{ "start": 3000, "end": 20556 }
class ____: """ Represents the main Wa-Tor algorithm. :attr time_passed: A function that is called every time time passes (a chronon) in order to visually display the new Wa-Tor planet. The `time_passed` function can block using ``time.sleep`` to slow the algorithm progression. >>> wt = WaTor(10, 15) >>> wt.width 10 >>> wt.height 15 >>> len(wt.planet) 15 >>> len(wt.planet[0]) 10 >>> len(wt.get_entities()) == PREDATOR_INITIAL_COUNT + PREY_INITIAL_COUNT True """ time_passed: Callable[["WaTor", int], None] | None def __init__(self, width: int, height: int) -> None: self.width = width self.height = height self.time_passed = None self.planet: list[list[Entity | None]] = [[None] * width for _ in range(height)] # Populate planet with predators and prey randomly for _ in range(PREY_INITIAL_COUNT): self.add_entity(prey=True) for _ in range(PREDATOR_INITIAL_COUNT): self.add_entity(prey=False) self.set_planet(self.planet) def set_planet(self, planet: list[list[Entity | None]]) -> None: """ Ease of access for testing >>> wt = WaTor(WIDTH, HEIGHT) >>> planet = [ ... [None, None, None], ... [None, Entity(True, coords=(1, 1)), None] ... ] >>> wt.set_planet(planet) >>> wt.planet == planet True >>> wt.width 3 >>> wt.height 2 """ self.planet = planet self.width = len(planet[0]) self.height = len(planet) def add_entity(self, prey: bool) -> None: """ Adds an entity, making sure the entity does not override another entity >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.set_planet([[None, None], [None, None]]) >>> wt.add_entity(True) >>> len(wt.get_entities()) 1 >>> wt.add_entity(False) >>> len(wt.get_entities()) 2 """ while True: row, col = randint(0, self.height - 1), randint(0, self.width - 1) if self.planet[row][col] is None: self.planet[row][col] = Entity(prey=prey, coords=(row, col)) return def get_entities(self) -> list[Entity]: """ Returns a list of all the entities within the planet. >>> wt = WaTor(WIDTH, HEIGHT) >>> len(wt.get_entities()) == PREDATOR_INITIAL_COUNT + PREY_INITIAL_COUNT True """ return [entity for column in self.planet for entity in column if entity] def balance_predators_and_prey(self) -> None: """ Balances predators and preys so that prey can not dominate the predators, blocking up space for them to reproduce. >>> wt = WaTor(WIDTH, HEIGHT) >>> for i in range(2000): ... row, col = i // HEIGHT, i % WIDTH ... wt.planet[row][col] = Entity(True, coords=(row, col)) >>> entities = len(wt.get_entities()) >>> wt.balance_predators_and_prey() >>> len(wt.get_entities()) == entities False """ entities = self.get_entities() shuffle(entities) if len(entities) >= MAX_ENTITIES - MAX_ENTITIES / 10: prey = [entity for entity in entities if entity.prey] predators = [entity for entity in entities if not entity.prey] prey_count, predator_count = len(prey), len(predators) entities_to_purge = ( prey[:DELETE_UNBALANCED_ENTITIES] if prey_count > predator_count else predators[:DELETE_UNBALANCED_ENTITIES] ) for entity in entities_to_purge: self.planet[entity.coords[0]][entity.coords[1]] = None def get_surrounding_prey(self, entity: Entity) -> list[Entity]: """ Returns all the prey entities around (N, S, E, W) a predator entity. Subtly different to the `move_and_reproduce`. >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.set_planet([ ... [None, Entity(True, (0, 1)), None], ... [None, Entity(False, (1, 1)), None], ... [None, Entity(True, (2, 1)), None]]) >>> wt.get_surrounding_prey( ... Entity(False, (1, 1))) # doctest: +NORMALIZE_WHITESPACE [Entity(prey=True, coords=(0, 1), remaining_reproduction_time=5), Entity(prey=True, coords=(2, 1), remaining_reproduction_time=5)] >>> wt.set_planet([[Entity(False, (0, 0))]]) >>> wt.get_surrounding_prey(Entity(False, (0, 0))) [] >>> wt.set_planet([ ... [Entity(True, (0, 0)), Entity(False, (1, 0)), Entity(False, (2, 0))], ... [None, Entity(False, (1, 1)), Entity(True, (2, 1))], ... [None, None, None]]) >>> wt.get_surrounding_prey(Entity(False, (1, 0))) [Entity(prey=True, coords=(0, 0), remaining_reproduction_time=5)] """ row, col = entity.coords adjacent: list[tuple[int, int]] = [ (row - 1, col), # North (row + 1, col), # South (row, col - 1), # West (row, col + 1), # East ] return [ ent for r, c in adjacent if 0 <= r < self.height and 0 <= c < self.width and (ent := self.planet[r][c]) is not None and ent.prey ] def move_and_reproduce( self, entity: Entity, direction_orders: list[Literal["N", "E", "S", "W"]] ) -> None: """ Attempts to move to an unoccupied neighbouring square in either of the four directions (North, South, East, West). If the move was successful and the `remaining_reproduction_time` is equal to 0, then a new prey or predator can also be created in the previous square. :param direction_orders: Ordered list (like priority queue) depicting order to attempt to move. Removes any systematic approach of checking neighbouring squares. >>> planet = [ ... [None, None, None], ... [None, Entity(True, coords=(1, 1)), None], ... [None, None, None] ... ] >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.set_planet(planet) >>> wt.move_and_reproduce(Entity(True, coords=(1, 1)), direction_orders=["N"]) >>> wt.planet # doctest: +NORMALIZE_WHITESPACE [[None, Entity(prey=True, coords=(0, 1), remaining_reproduction_time=4), None], [None, None, None], [None, None, None]] >>> wt.planet[0][0] = Entity(True, coords=(0, 0)) >>> wt.move_and_reproduce(Entity(True, coords=(0, 1)), ... direction_orders=["N", "W", "E", "S"]) >>> wt.planet # doctest: +NORMALIZE_WHITESPACE [[Entity(prey=True, coords=(0, 0), remaining_reproduction_time=5), None, Entity(prey=True, coords=(0, 2), remaining_reproduction_time=4)], [None, None, None], [None, None, None]] >>> wt.planet[0][1] = wt.planet[0][2] >>> wt.planet[0][2] = None >>> wt.move_and_reproduce(Entity(True, coords=(0, 1)), ... direction_orders=["N", "W", "S", "E"]) >>> wt.planet # doctest: +NORMALIZE_WHITESPACE [[Entity(prey=True, coords=(0, 0), remaining_reproduction_time=5), None, None], [None, Entity(prey=True, coords=(1, 1), remaining_reproduction_time=4), None], [None, None, None]] >>> wt = WaTor(WIDTH, HEIGHT) >>> reproducable_entity = Entity(False, coords=(0, 1)) >>> reproducable_entity.remaining_reproduction_time = 0 >>> wt.planet = [[None, reproducable_entity]] >>> wt.move_and_reproduce(reproducable_entity, ... direction_orders=["N", "W", "S", "E"]) >>> wt.planet # doctest: +NORMALIZE_WHITESPACE [[Entity(prey=False, coords=(0, 0), remaining_reproduction_time=20, energy_value=15), Entity(prey=False, coords=(0, 1), remaining_reproduction_time=20, energy_value=15)]] """ row, col = coords = entity.coords adjacent_squares: dict[Literal["N", "E", "S", "W"], tuple[int, int]] = { "N": (row - 1, col), # North "S": (row + 1, col), # South "W": (row, col - 1), # West "E": (row, col + 1), # East } # Weight adjacent locations adjacent: list[tuple[int, int]] = [] for order in direction_orders: adjacent.append(adjacent_squares[order]) for r, c in adjacent: if ( 0 <= r < self.height and 0 <= c < self.width and self.planet[r][c] is None ): # Move entity to empty adjacent square self.planet[r][c] = entity self.planet[row][col] = None entity.coords = (r, c) break # (2.) See if it possible to reproduce in previous square if coords != entity.coords and entity.remaining_reproduction_time <= 0: # Check if the entities on the planet is less than the max limit if len(self.get_entities()) < MAX_ENTITIES: # Reproduce in previous square self.planet[row][col] = Entity(prey=entity.prey, coords=coords) entity.reset_reproduction_time() else: entity.remaining_reproduction_time -= 1 def perform_prey_actions( self, entity: Entity, direction_orders: list[Literal["N", "E", "S", "W"]] ) -> None: """ Performs the actions for a prey entity For prey the rules are: 1. At each chronon, a prey moves randomly to one of the adjacent unoccupied squares. If there are no free squares, no movement takes place. 2. Once a prey has survived a certain number of chronons it may reproduce. This is done as it moves to a neighbouring square, leaving behind a new prey in its old position. Its reproduction time is also reset to zero. >>> wt = WaTor(WIDTH, HEIGHT) >>> reproducable_entity = Entity(True, coords=(0, 1)) >>> reproducable_entity.remaining_reproduction_time = 0 >>> wt.planet = [[None, reproducable_entity]] >>> wt.perform_prey_actions(reproducable_entity, ... direction_orders=["N", "W", "S", "E"]) >>> wt.planet # doctest: +NORMALIZE_WHITESPACE [[Entity(prey=True, coords=(0, 0), remaining_reproduction_time=5), Entity(prey=True, coords=(0, 1), remaining_reproduction_time=5)]] """ self.move_and_reproduce(entity, direction_orders) def perform_predator_actions( self, entity: Entity, occupied_by_prey_coords: tuple[int, int] | None, direction_orders: list[Literal["N", "E", "S", "W"]], ) -> None: """ Performs the actions for a predator entity :param occupied_by_prey_coords: Move to this location if there is prey there For predators the rules are: 1. At each chronon, a predator moves randomly to an adjacent square occupied by a prey. If there is none, the predator moves to a random adjacent unoccupied square. If there are no free squares, no movement takes place. 2. At each chronon, each predator is deprived of a unit of energy. 3. Upon reaching zero energy, a predator dies. 4. If a predator moves to a square occupied by a prey, it eats the prey and earns a certain amount of energy. 5. Once a predator has survived a certain number of chronons it may reproduce in exactly the same way as the prey. >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.set_planet([[Entity(True, coords=(0, 0)), Entity(False, coords=(0, 1))]]) >>> wt.perform_predator_actions(Entity(False, coords=(0, 1)), (0, 0), []) >>> wt.planet # doctest: +NORMALIZE_WHITESPACE [[Entity(prey=False, coords=(0, 0), remaining_reproduction_time=20, energy_value=19), None]] """ assert entity.energy_value is not None # [type checking] # (3.) If the entity has 0 energy, it will die if entity.energy_value == 0: self.planet[entity.coords[0]][entity.coords[1]] = None return # (1.) Move to entity if possible if occupied_by_prey_coords is not None: # Kill the prey prey = self.planet[occupied_by_prey_coords[0]][occupied_by_prey_coords[1]] assert prey is not None prey.alive = False # Move onto prey self.planet[occupied_by_prey_coords[0]][occupied_by_prey_coords[1]] = entity self.planet[entity.coords[0]][entity.coords[1]] = None entity.coords = occupied_by_prey_coords # (4.) Eats the prey and earns energy entity.energy_value += PREDATOR_FOOD_VALUE else: # (5.) If it has survived the certain number of chronons it will also # reproduce in this function self.move_and_reproduce(entity, direction_orders) # (2.) Each chronon, the predator is deprived of a unit of energy entity.energy_value -= 1 def run(self, *, iteration_count: int) -> None: """ Emulate time passing by looping `iteration_count` times >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.run(iteration_count=PREDATOR_INITIAL_ENERGY_VALUE - 1) >>> len(list(filter(lambda entity: entity.prey is False, ... wt.get_entities()))) >= PREDATOR_INITIAL_COUNT True """ for iter_num in range(iteration_count): # Generate list of all entities in order to randomly # pop an entity at a time to simulate true randomness # This removes the systematic approach of iterating # through each entity width by height all_entities = self.get_entities() for __ in range(len(all_entities)): entity = all_entities.pop(randint(0, len(all_entities) - 1)) if entity.alive is False: continue directions: list[Literal["N", "E", "S", "W"]] = ["N", "E", "S", "W"] shuffle(directions) # Randomly shuffle directions if entity.prey: self.perform_prey_actions(entity, directions) else: # Create list of surrounding prey surrounding_prey = self.get_surrounding_prey(entity) surrounding_prey_coords = None if surrounding_prey: # Again, randomly shuffle directions shuffle(surrounding_prey) surrounding_prey_coords = surrounding_prey[0].coords self.perform_predator_actions( entity, surrounding_prey_coords, directions ) # Balance out the predators and prey self.balance_predators_and_prey() if self.time_passed is not None: # Call time_passed function for Wa-Tor planet # visualisation in a terminal or a graph. self.time_passed(self, iter_num) def visualise(wt: WaTor, iter_number: int, *, colour: bool = True) -> None: """ Visually displays the Wa-Tor planet using an ascii code in terminal to clear and re-print the Wa-Tor planet at intervals. Uses ascii colour codes to colourfully display the predators and prey: * (0x60f197) Prey = ``#`` * (0xfffff) Predator = ``x`` >>> wt = WaTor(30, 30) >>> wt.set_planet([ ... [Entity(True, coords=(0, 0)), Entity(False, coords=(0, 1)), None], ... [Entity(False, coords=(1, 0)), None, Entity(False, coords=(1, 2))], ... [None, Entity(True, coords=(2, 1)), None] ... ]) >>> visualise(wt, 0, colour=False) # doctest: +NORMALIZE_WHITESPACE # x . x . x . # . <BLANKLINE> Iteration: 0 | Prey count: 2 | Predator count: 3 | """ if colour: __import__("os").system("") print("\x1b[0;0H\x1b[2J\x1b[?25l") reprint = "\x1b[0;0H" if colour else "" ansi_colour_end = "\x1b[0m " if colour else " " planet = wt.planet output = "" # Iterate over every entity in the planet for row in planet: for entity in row: if entity is None: output += " . " else: if colour is True: output += ( "\x1b[38;2;96;241;151m" if entity.prey else "\x1b[38;2;255;255;15m" ) output += f" {'#' if entity.prey else 'x'}{ansi_colour_end}" output += "\n" entities = wt.get_entities() prey_count = sum(entity.prey for entity in entities) print( f"{output}\n Iteration: {iter_number} | Prey count: {prey_count} | " f"Predator count: {len(entities) - prey_count} | {reprint}" ) # Block the thread to be able to visualise seeing the algorithm sleep(0.05) if __name__ == "__main__": import doctest doctest.testmod() wt = WaTor(WIDTH, HEIGHT) wt.time_passed = visualise wt.run(iteration_count=100_000)
WaTor
python
plotly__plotly.py
plotly/graph_objs/scatter3d/line/colorbar/title/_font.py
{ "start": 233, "end": 9949 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "scatter3d.line.colorbar.title" _path_str = "scatter3d.line.colorbar.title.font" _valid_props = { "color", "family", "lineposition", "shadow", "size", "style", "textcase", "variant", "weight", } @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. The 'family' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["family"] @family.setter def family(self, val): self["family"] = val @property def lineposition(self): """ Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. The 'lineposition' property is a flaglist and may be specified as a string containing: - Any combination of ['under', 'over', 'through'] joined with '+' characters (e.g. 'under+over') OR exactly one of ['none'] (e.g. 'none') Returns ------- Any """ return self["lineposition"] @lineposition.setter def lineposition(self, val): self["lineposition"] = val @property def shadow(self): """ Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow for additional options. The 'shadow' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["shadow"] @shadow.setter def shadow(self, val): self["shadow"] = val @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def style(self): """ Sets whether a font should be styled with a normal or italic face from its family. The 'style' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'italic'] Returns ------- Any """ return self["style"] @style.setter def style(self, val): self["style"] = val @property def textcase(self): """ Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. The 'textcase' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'word caps', 'upper', 'lower'] Returns ------- Any """ return self["textcase"] @textcase.setter def textcase(self, val): self["textcase"] = val @property def variant(self): """ Sets the variant of the font. The 'variant' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'small-caps', 'all-small-caps', 'all-petite-caps', 'petite-caps', 'unicase'] Returns ------- Any """ return self["variant"] @variant.setter def variant(self, val): self["variant"] = val @property def weight(self): """ Sets the weight (or boldness) of the font. The 'weight' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 1000] OR exactly one of ['normal', 'bold'] (e.g. 'bold') Returns ------- int """ return self["weight"] @weight.setter def weight(self, val): self["weight"] = val @property def _prop_descriptions(self): return """\ color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. """ def __init__( self, arg=None, color=None, family=None, lineposition=None, shadow=None, size=None, style=None, textcase=None, variant=None, weight=None, **kwargs, ): """ Construct a new Font object Sets this color bar's title font. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scatter3d.line .colorbar.title.Font` color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. Returns ------- Font """ super().__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.scatter3d.line.colorbar.title.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.scatter3d.line.colorbar.title.Font`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("family", arg, family) self._set_property("lineposition", arg, lineposition) self._set_property("shadow", arg, shadow) self._set_property("size", arg, size) self._set_property("style", arg, style) self._set_property("textcase", arg, textcase) self._set_property("variant", arg, variant) self._set_property("weight", arg, weight) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Font
python
streamlit__streamlit
lib/tests/streamlit/elements/chat_test.py
{ "start": 1473, "end": 30111 }
class ____(DeltaGeneratorTestCase): """Test ability to marshall ChatInput and ChatMessage protos.""" def test_label_required(self): """Test that label is required""" with pytest.raises(TypeError): st.chat_message() def test_nesting_is_allowed(self): """Test that it is allowed to be nested.""" with st.chat_message("user"), st.chat_message("assistant"): st.write("hello") @parameterized.expand( [ ("user", {"name": "user", "avatar": "user"}), ("assistant", {"name": "assistant", "avatar": "assistant"}), ("ai", {"name": "ai", "avatar": "assistant"}), ("human", {"name": "human", "avatar": "user"}), ] ) def test_message_name(self, message_name, expected): """Test that message's name param maps to the correct value and avatar.""" message = st.chat_message(message_name) with message: pass message_block = self.get_delta_from_queue() assert message_block.add_block.chat_message.name == expected["name"] assert message_block.add_block.chat_message.avatar == expected["avatar"] assert ( message_block.add_block.chat_message.avatar_type == BlockProto.ChatMessage.AvatarType.ICON ) @parameterized.expand( [ ("👋", {"avatar": "👋", "type": BlockProto.ChatMessage.AvatarType.EMOJI}), ( "http://not.a.real.url", { "avatar": "http://not.a.real.url", "type": BlockProto.ChatMessage.AvatarType.IMAGE, }, ), ] ) def test_non_str_avatar_type(self, avatar, expected): """Test that it is possible to set an emoji and an image as avatar.""" message = st.chat_message("test", avatar=avatar) with message: pass message_block = self.get_delta_from_queue() assert message_block.add_block.chat_message.name == "test" assert message_block.add_block.chat_message.avatar == expected["avatar"] assert message_block.add_block.chat_message.avatar_type == expected["type"] def test_throws_invalid_avatar_exception(self): """Test that chat_message throws an StreamlitAPIException on invalid avatar input.""" with pytest.raises(StreamlitAPIException): st.chat_message("user", avatar="FOOO") def test_chat_input(self): """Test that it can be called.""" st.chat_input("Placeholder") c = self.get_delta_from_queue().new_element.chat_input assert c.placeholder == "Placeholder" assert c.default == "" assert c.value == "" assert not c.set_value assert c.max_chars == 0 assert not c.disabled def test_chat_input_disabled(self): """Test that it sets disabled correctly.""" st.chat_input("Placeholder", disabled=True) c = self.get_delta_from_queue().new_element.chat_input assert c.placeholder == "Placeholder" assert c.default == "" assert c.value == "" assert not c.set_value assert c.max_chars == 0 assert c.disabled def test_chat_input_max_chars(self): """Test that it sets max chars correctly.""" st.chat_input("Placeholder", max_chars=100) c = self.get_delta_from_queue().new_element.chat_input assert c.placeholder == "Placeholder" assert c.default == "" assert c.value == "" assert not c.set_value assert c.max_chars == 100 assert c.accept_file == ChatInput.AcceptFile.NONE assert not c.disabled assert c.file_type == [] def test_chat_not_allowed_in_form(self): """Test that it disallows being called in a form.""" with pytest.raises(StreamlitAPIException) as exception_message: st.form("Form Key").chat_input() assert ( str(exception_message.value) == "`st.chat_input()` can't be used in a `st.form()`." ) @parameterized.expand( [ lambda: st.columns(2)[0], lambda: st.tabs(["Tab1", "Tab2"])[0], lambda: st.expander("Expand Me"), lambda: st.chat_message("user"), lambda: st.sidebar, lambda: st.container(), ] ) def test_chat_selects_inline_postion(self, container_call): """Test that it selects inline position when nested in any of layout containers.""" container_call().chat_input() assert ( self.get_message_from_queue().metadata.delta_path[0] != RootContainerProto.BOTTOM ) @parameterized.expand( [ lambda: st, lambda: st._main, ] ) def test_chat_selects_bottom_position(self, container_call): """Test that it selects bottom position when called in the main dg.""" container_call().chat_input() assert ( self.get_message_from_queue().metadata.delta_path[0] == RootContainerProto.BOTTOM ) def test_supports_programmatic_value_assignment(self): """Test that it supports programmatically setting the value in session state.""" st.session_state.my_key = "Foo" st.chat_input(key="my_key") assert st.session_state.my_key is None c = self.get_delta_from_queue().new_element.chat_input assert c.default == "" assert c.value == "Foo" assert c.set_value is True def test_chat_input_cached_widget_replay_warning(self): """Test that a warning is shown when this widget is used inside a cached function.""" st.cache_data(lambda: st.chat_input("the label"))() # The widget itself is still created, so we need to go back one element more: el = self.get_delta_from_queue(-2).new_element.exception assert el.type == "CachedWidgetWarning" assert el.is_warning @parameterized.expand( [ (False, ChatInput.AcceptFile.NONE), (True, ChatInput.AcceptFile.SINGLE), ("multiple", ChatInput.AcceptFile.MULTIPLE), ] ) def test_chat_input_accept_file(self, accept_file, expected): st.chat_input(accept_file=accept_file) c = self.get_delta_from_queue().new_element.chat_input assert c.accept_file == expected def test_chat_input_invalid_accept_file(self): with pytest.raises(StreamlitAPIException) as ex: st.chat_input(accept_file="invalid") assert ( str(ex.value) == "The `accept_file` parameter must be a boolean or 'multiple' or 'directory'." ) def test_file_type(self): """Test that it can be called using string(s) for type parameter.""" st.chat_input(file_type="png") c = self.get_delta_from_queue().new_element.chat_input assert c.file_type == [".png"] @patch("streamlit.elements.widgets.chat.ChatInputSerde.deserialize") def test_multiple_files(self, deserialize_patch): rec0 = UploadedFileRec("file0", "name0", "type", b"123") rec1 = UploadedFileRec("file1", "name1", "type", b"456") uploaded_files = [ UploadedFile( rec0, FileURLsProto(file_id="file0", delete_url="d0", upload_url="u0") ), UploadedFile( rec1, FileURLsProto(file_id="file1", delete_url="d1", upload_url="u1") ), ] deserialize_patch.return_value = ChatInputValue( text="placeholder", files=uploaded_files, _include_files=True ) return_val = st.chat_input(accept_file="multiple") assert return_val.files == uploaded_files for actual, expected in zip(return_val.files, uploaded_files, strict=False): assert actual.name == expected.name assert actual.type == expected.type assert actual.size == expected.size assert actual.getvalue() == expected.getvalue() @patch("streamlit.elements.widgets.chat.ChatInputSerde.deserialize") def test_unique_uploaded_file_instance(self, deserialize_patch): """We should get a unique UploadedFile instance each time we access the chat_input widget.""" # Patch UploadFileManager to return two files rec0 = UploadedFileRec("file0", "name0", "type", b"123") rec1 = UploadedFileRec("file1", "name1", "type", b"456") uploaded_files = [ UploadedFile( rec0, FileURLsProto(file_id="file0", delete_url="d0", upload_url="u0") ), UploadedFile( rec1, FileURLsProto(file_id="file1", delete_url="d1", upload_url="u1") ), ] deserialize_patch.return_value = ChatInputValue( text="placeholder", files=uploaded_files, _include_files=True ) # These file_uploaders have different labels so that we don't cause # a DuplicateKey error - but because we're patching the get_files # function, both file_uploaders will refer to the same files. file0 = st.chat_input(key="key0", accept_file=True).files[0] file1 = st.chat_input(key="key1", accept_file=True).files[0] assert id(file0) != id(file1) # Seeking in one instance should not impact the position in the other. file0.seek(2) assert file0.read() == b"3" assert file1.read() == b"123" @patch("streamlit.elements.widgets.chat.ChatInputSerde.deserialize") def test_chat_input_value_is_custom_dict(self, deserialize_patch): """Test that ChatInputValue is a custom dict.""" files = [ UploadedFile( UploadedFileRec("file0", "name0", "type", b"123"), FileURLsProto(file_id="file0", delete_url="d0", upload_url="u0"), ), ] deserialize_patch.return_value = ChatInputValue(text="placeholder", files=files) value = st.chat_input("Placeholder", accept_file=True) assert is_custom_dict(value) value = st.chat_input("Placeholder", accept_file="multiple") assert is_custom_dict(value) def test_chat_message_width_config_default(self): """Test that default width is 'stretch' for chat_message.""" with st.chat_message("user"): pass message_block = self.get_delta_from_queue() assert ( message_block.add_block.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert message_block.add_block.width_config.use_stretch def test_chat_message_width_config_pixel(self): """Test that pixel width works properly for chat_message.""" with st.chat_message("user", width=300): pass message_block = self.get_delta_from_queue() assert ( message_block.add_block.width_config.WhichOneof("width_spec") == WidthConfigFields.PIXEL_WIDTH.value ) assert message_block.add_block.width_config.pixel_width == 300 def test_chat_message_width_config_content(self): """Test that 'content' width works properly for chat_message.""" with st.chat_message("user", width="content"): pass message_block = self.get_delta_from_queue() assert ( message_block.add_block.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_CONTENT.value ) assert message_block.add_block.width_config.use_content def test_chat_message_width_config_stretch(self): """Test that 'stretch' width works properly for chat_message.""" with st.chat_message("user", width="stretch"): pass message_block = self.get_delta_from_queue() assert ( message_block.add_block.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert message_block.add_block.width_config.use_stretch @parameterized.expand( [ "invalid", -100, 0, 100.5, None, ] ) def test_chat_message_invalid_width(self, width): """Test that invalid width values raise exceptions for chat_message.""" with pytest.raises(StreamlitInvalidWidthError): st.chat_message("user", width=width) def test_chat_input_width_config_default(self): """Test that default width is 'stretch' for chat_input.""" st.chat_input("Placeholder") c = self.get_delta_from_queue().new_element assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert c.width_config.use_stretch def test_chat_input_width_config_pixel(self): """Test that pixel width works properly for chat_input.""" st.chat_input("Placeholder", width=300) c = self.get_delta_from_queue().new_element assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.PIXEL_WIDTH.value ) assert c.width_config.pixel_width == 300 def test_chat_input_width_config_stretch(self): """Test that 'stretch' width works properly for chat_input.""" st.chat_input("Placeholder", width="stretch") c = self.get_delta_from_queue().new_element assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert c.width_config.use_stretch @parameterized.expand( [ "invalid", "content", -100, 0, 100.5, None, ] ) def test_chat_input_invalid_width(self, width): """Test that invalid width values raise exceptions for chat_input.""" with pytest.raises(StreamlitInvalidWidthError): st.chat_input("Placeholder", width=width) @parameterized.expand( [ ( "accept_file", True, "multiple", ), ( "file_type", ["txt"], ["csv"], ), ( "max_chars", 100, 200, ), ] ) def test_whitelisted_stable_key_kwargs( self, kwarg_name: str, value1: object, value2: object ) -> None: """Test that the widget ID changes when a whitelisted kwarg changes even when the key is provided.""" with patch( "streamlit.elements.lib.utils._register_element_id", return_value=MagicMock(), ): base_kwargs = { "placeholder": "Label 1", "key": "chat_input_key", # Keep other whitelisted params stable depending on the tested kwarg "accept_file": True, "file_type": ["txt"], "max_chars": 100, } base_kwargs[kwarg_name] = value1 st.chat_input(**base_kwargs) c1 = self.get_delta_from_queue().new_element.chat_input id1 = c1.id base_kwargs[kwarg_name] = value2 st.chat_input(**base_kwargs) c2 = self.get_delta_from_queue().new_element.chat_input id2 = c2.id assert id1 != id2 def test_stable_id_with_key(self): """Test that the widget ID is stable when a stable key is provided and only non-whitelisted kwargs change.""" with patch( "streamlit.elements.lib.utils._register_element_id", return_value=MagicMock(), ): # First render with certain params (keep whitelisted kwargs stable) st.chat_input( placeholder="Label 1", key="chat_input_key", disabled=False, width="stretch", on_submit=lambda: None, args=("arg1", "arg2"), kwargs={"kwarg1": "kwarg1"}, # Whitelisted kwargs (keep stable): accept_file=True, file_type=["txt"], max_chars=100, ) c1 = self.get_delta_from_queue().new_element.chat_input id1 = c1.id # Second render with different non-whitelisted params but same key st.chat_input( placeholder="Label 2", key="chat_input_key", disabled=True, width=300, on_submit=lambda: None, args=("arg_1", "arg_2"), kwargs={"kwarg_1": "kwarg_1"}, # Keep whitelisted the same to ensure ID stability accept_file=True, file_type=["txt"], max_chars=100, ) c2 = self.get_delta_from_queue().new_element.chat_input id2 = c2.id assert id1 == id2 def test_just_label(self): """Test st.chat_input with just a label.""" st.chat_input("the label") c = self.get_delta_from_queue().new_element.chat_input assert c.placeholder == "the label" assert not c.disabled assert c.max_chars == 0 def test_just_disabled(self): """Test st.chat_input with disabled=True.""" st.chat_input("the label", disabled=True) c = self.get_delta_from_queue().new_element.chat_input assert c.placeholder == "the label" assert c.disabled def test_max_chars(self): """Test st.chat_input with max_chars set.""" st.chat_input("the label", max_chars=10) c = self.get_delta_from_queue().new_element.chat_input assert c.placeholder == "the label" assert c.max_chars == 10 def test_accept_file_single(self): """Test st.chat_input with accept_file=True.""" st.chat_input("the label", accept_file=True, file_type=["txt", "csv"]) c = self.get_delta_from_queue().new_element.chat_input assert c.placeholder == "the label" assert c.accept_file == ChatInput.AcceptFile.SINGLE assert c.file_type == [".txt", ".csv"] def test_accept_file_multiple(self): """Test st.chat_input with accept_file='multiple'.""" st.chat_input("the label", accept_file="multiple", file_type=["txt"]) c = self.get_delta_from_queue().new_element.chat_input assert c.placeholder == "the label" assert c.accept_file == ChatInput.AcceptFile.MULTIPLE assert c.file_type == [".txt"] def test_accept_file_directory(self): """Test st.chat_input with accept_file='directory'.""" st.chat_input( "the label", accept_file="directory", file_type=["py", "md", "txt"] ) c = self.get_delta_from_queue().new_element.chat_input assert c.placeholder == "the label" assert c.accept_file == ChatInput.AcceptFile.DIRECTORY assert c.file_type == [".py", ".md", ".txt"] def test_directory_upload_with_no_file_type(self): """Test directory upload without file type restrictions.""" st.chat_input("Upload any directory", accept_file="directory") c = self.get_delta_from_queue().new_element.chat_input assert c.accept_file == ChatInput.AcceptFile.DIRECTORY assert c.file_type == [] # No restrictions def test_directory_upload_with_width(self): """Test directory upload with width parameter.""" st.chat_input("Directory chat", accept_file="directory", width=400) c = self.get_delta_from_queue().new_element.chat_input assert c.accept_file == ChatInput.AcceptFile.DIRECTORY def test_directory_upload_disabled(self): """Test disabled directory upload.""" st.chat_input("Disabled directory", accept_file="directory", disabled=True) c = self.get_delta_from_queue().new_element.chat_input assert c.accept_file == ChatInput.AcceptFile.DIRECTORY assert c.disabled def test_directory_upload_with_max_chars(self): """Test directory upload with character limit.""" st.chat_input("Limited text", accept_file="directory", max_chars=100) c = self.get_delta_from_queue().new_element.chat_input assert c.accept_file == ChatInput.AcceptFile.DIRECTORY assert c.max_chars == 100 def test_accept_file_invalid_value(self): """Test that invalid accept_file values raise an error.""" with pytest.raises(StreamlitAPIException) as cm: st.chat_input("the label", accept_file="invalid") assert ( "The `accept_file` parameter must be a boolean or 'multiple' or 'directory'." in str(cm.value) ) def test_directory_upload_with_callback(self): """Test directory upload with on_submit callback.""" def callback(): pass st.chat_input( "Directory with callback", accept_file="directory", on_submit=callback ) c = self.get_delta_from_queue().new_element.chat_input assert c.accept_file == ChatInput.AcceptFile.DIRECTORY def test_file_type_normalization_for_directory(self): """Test that file types are properly normalized for directory upload.""" # Test with various file type formats st.chat_input("Directory", accept_file="directory", file_type=".txt") c1 = self.get_delta_from_queue().new_element.chat_input assert c1.file_type == [".txt"] st.chat_input( "Directory", accept_file="directory", file_type=["py", ".md", "txt"] ) c2 = self.get_delta_from_queue().new_element.chat_input assert c2.file_type == [".py", ".md", ".txt"] @patch("streamlit.elements.widgets.chat.ChatInputSerde.deserialize") def test_audio_file(self, deserialize_patch): """Test that audio file is properly handled by ChatInputValue.""" rec = UploadedFileRec("audio0", "recording.wav", "audio/wav", b"audio data") audio_file = UploadedFile( rec, FileURLsProto(file_id="audio0", delete_url="d0", upload_url="u0") ) deserialize_patch.return_value = ChatInputValue( text="", files=[], audio=audio_file, _include_files=True, _include_audio=True, ) return_val = st.chat_input(accept_file="multiple", accept_audio=True) assert return_val.audio == audio_file assert return_val.audio.name == "recording.wav" assert return_val.audio.type == "audio/wav" assert return_val.audio.getvalue() == b"audio data" @patch("streamlit.elements.widgets.chat.ChatInputSerde.deserialize") def test_audio_file_none(self, deserialize_patch): """Test that ChatInputValue handles None audio file correctly.""" deserialize_patch.return_value = ChatInputValue( text="hello", files=[], audio=None, _include_files=True, _include_audio=True ) return_val = st.chat_input(accept_file="multiple", accept_audio=True) assert return_val.audio is None assert return_val.text == "hello" @patch("streamlit.elements.widgets.chat.ChatInputSerde.deserialize") def test_chat_input_value_with_audio(self, deserialize_patch): """Test ChatInputValue dict-like interface with audio field.""" rec = UploadedFileRec("audio0", "recording.wav", "audio/wav", b"audio data") audio_file = UploadedFile( rec, FileURLsProto(file_id="audio0", delete_url="d0", upload_url="u0") ) deserialize_patch.return_value = ChatInputValue( text="test", files=[], audio=audio_file, _include_files=True, _include_audio=True, ) return_val = st.chat_input(accept_file="multiple", accept_audio=True) # Test dict-like access assert return_val["audio"] == audio_file assert return_val["text"] == "test" assert "audio" in return_val assert len(return_val) == 3 # text, files, audio # Test to_dict as_dict = return_val.to_dict() assert as_dict["audio"] == audio_file assert as_dict["text"] == "test" assert as_dict["files"] == [] def test_chat_input_accept_audio_false(self): """Test that accept_audio=False correctly sets the proto field.""" st.chat_input(accept_audio=False) c = self.get_delta_from_queue().new_element.chat_input assert c.accept_audio is False def test_chat_input_accept_audio_true(self): """Test that accept_audio=True correctly sets the proto field.""" st.chat_input(accept_audio=True) c = self.get_delta_from_queue().new_element.chat_input assert c.accept_audio is True def test_chat_input_audio_sample_rate_default(self): """Test that audio_sample_rate defaults to 16000.""" st.chat_input(accept_audio=True) c = self.get_delta_from_queue().new_element.chat_input assert c.audio_sample_rate == 16000 @parameterized.expand( [ (8000,), (16000,), (48000,), ] ) def test_chat_input_audio_sample_rate_valid(self, sample_rate: int): """Test that valid audio_sample_rate values are set correctly.""" st.chat_input(accept_audio=True, audio_sample_rate=sample_rate) c = self.get_delta_from_queue().new_element.chat_input assert c.audio_sample_rate == sample_rate def test_chat_input_audio_sample_rate_none(self): """Test that audio_sample_rate=None is handled correctly.""" st.chat_input(accept_audio=True, audio_sample_rate=None) c = self.get_delta_from_queue().new_element.chat_input assert c.HasField("audio_sample_rate") is False def test_chat_input_audio_sample_rate_invalid(self): """Test that invalid audio_sample_rate raises an error.""" with pytest.raises(StreamlitAPIException) as exc: st.chat_input(accept_audio=True, audio_sample_rate=12345) assert "Invalid audio_sample_rate" in str(exc.value) @parameterized.expand( [ (False, False, False, False, {"text"}), ("multiple", False, True, False, {"text", "files"}), (False, True, False, True, {"text", "audio"}), ("multiple", True, True, True, {"text", "files", "audio"}), ] ) @patch("streamlit.elements.widgets.chat.ChatInputSerde.deserialize") def test_chat_input_value_conditional_keys( self, accept_file, accept_audio, include_files, include_audio, expected_keys, deserialize_patch, ): """Test that ChatInputValue only includes keys based on accept_file/accept_audio.""" deserialize_patch.return_value = ChatInputValue( text="test", files=[], audio=None, _include_files=include_files, _include_audio=include_audio, ) return_val = st.chat_input(accept_file=accept_file, accept_audio=accept_audio) # Verify expected keys are present assert set(return_val.keys()) == expected_keys # Verify text is always accessible assert "text" in return_val assert return_val["text"] == "test" assert return_val.text == "test" # Verify files key behavior if "files" in expected_keys: assert "files" in return_val assert return_val["files"] == [] assert return_val.files == [] else: assert "files" not in return_val with pytest.raises(KeyError): _ = return_val["files"] with pytest.raises(AttributeError): _ = return_val.files # Verify audio key behavior if "audio" in expected_keys: assert "audio" in return_val assert return_val["audio"] is None assert return_val.audio is None else: assert "audio" not in return_val with pytest.raises(KeyError): _ = return_val["audio"] with pytest.raises(AttributeError): _ = return_val.audio # Verify to_dict matches expected keys as_dict = return_val.to_dict() assert set(as_dict.keys()) == expected_keys
ChatTest
python
numpy__numpy
numpy/f2py/tests/test_kind.py
{ "start": 298, "end": 1845 }
class ____(util.F2PyTest): sources = [util.getpath("tests", "src", "kind", "foo.f90")] @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason="Fails for 32 bit machines") def test_int(self): """Test `int` kind_func for integers up to 10**40.""" selectedintkind = self.module.selectedintkind for i in range(40): assert selectedintkind(i) == selected_int_kind( i ), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}" def test_real(self): """ Test (processor-dependent) `real` kind_func for real numbers of up to 31 digits precision (extended/quadruple). """ selectedrealkind = self.module.selectedrealkind for i in range(32): assert selectedrealkind(i) == selected_real_kind( i ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" @pytest.mark.xfail(IS_PPC_OR_AIX, reason="Some PowerPC may not support full IEEE 754 precision") def test_quad_precision(self): """ Test kind_func for quadruple precision [`real(16)`] of 32+ digits . """ selectedrealkind = self.module.selectedrealkind for i in range(32, 40): assert selectedrealkind(i) == selected_real_kind( i ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}"
TestKind
python
huggingface__transformers
src/transformers/models/dinov2/modeling_dinov2.py
{ "start": 22268, "end": 25375 }
class ____(Dinov2PreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] self.embeddings = Dinov2Embeddings(config) self.encoder = Dinov2Encoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> Dinov2PatchEmbeddings: return self.embeddings.patch_embeddings @check_model_inputs() @auto_docstring def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, **kwargs ) -> BackboneOutput: r""" Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base") >>> model = AutoBackbone.from_pretrained( ... "facebook/dinov2-base", out_features=["stage2", "stage5", "stage8", "stage11"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 768, 16, 16] ```""" if output_hidden_states is None: output_hidden_states = self.config.output_hidden_states embedding_output = self.embeddings(pixel_values) output: BaseModelOutput = self.encoder(embedding_output, output_hidden_states=True) hidden_states = output.hidden_states feature_maps = [] for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: if self.config.apply_layernorm: hidden_state = self.layernorm(hidden_state) if self.config.reshape_hidden_states: hidden_state = hidden_state[:, 1:] # this was actually a bug in the original implementation that we copied here, # cause normally the order is height, width batch_size, _, height, width = pixel_values.shape patch_size = self.config.patch_size hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps.append(hidden_state) return BackboneOutput( feature_maps=tuple(feature_maps), hidden_states=hidden_states if output_hidden_states else None, ) __all__ = ["Dinov2ForImageClassification", "Dinov2Model", "Dinov2PreTrainedModel", "Dinov2Backbone"]
Dinov2Backbone
python
realpython__materials
django-diary/source_code_step_5/entries/views.py
{ "start": 677, "end": 775 }
class ____(DeleteView): model = Entry success_url = reverse_lazy("entry-list")
EntryDeleteView
python
realpython__materials
python-built-in-exceptions/birds.py
{ "start": 22, "end": 223 }
class ____(ABC): def swim(self): raise NotImplementedError("must be implemented in subclasses") def fly(self): raise NotImplementedError("must be implemented in subclasses")
Bird
python
getsentry__sentry
src/sentry/api/endpoints/organization_insights_tree.py
{ "start": 417, "end": 2472 }
class ____(OrganizationEventsEndpoint): """ Endpoint for querying Next.js Insights data to display a tree view of files and components. Currently, the component and path information is extracted from the span.description field using a regex. In the future, this data will be properly structured through: 1. The Next.js SDK adding these as explicit attributes 2. EAP adding support for array data storage and querying These improvements will enable more efficient querying and level-by-level tree navigation. This endpoint is temporary and will be replaced by the standard /events/ endpoint once these features are implemented elsewhere in the system. """ publish_status = { "GET": ApiPublishStatus.EXPERIMENTAL, } def get(self, request: Request, organization: Organization) -> Response: if not self.has_feature(organization, request): return Response(status=404) if not request.GET.get("noPagination", False): return Response(status=404) response = super().get(request, organization) return self._separate_span_description_info(response) def _separate_span_description_info(self, response): # Regex to split string into '{component_type}{space}({path})' pattern = re.compile(r"^(.*?)\s+\((.*?)\)$") for line in response.data["data"]: match = pattern.match(line["span.description"]) if match: component_type = match.group(1) path = match.group(2) path_components = path.strip("/").split("/") if not path_components or (len(path_components) == 1 and path_components[0] == ""): path_components = [] # Handle root path case else: component_type = None path_components = [] line["function.nextjs.component_type"] = component_type line["function.nextjs.path"] = path_components return response
OrganizationInsightsTreeEndpoint
python
huggingface__transformers
tests/quantization/bnb/test_mixed_int8.py
{ "start": 36352, "end": 37753 }
class ____(MixedInt8Test): model_name = "openai-community/gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 1.8720077507258357 EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I'm a big fan of") EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I'm a fan of the") # Expected values on a A10 EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I am a member of the") # Expected values on Intel CPU EXPECTED_OUTPUTS.add("Hello my name is John Doe. I am a man. I am") EXPECTED_OUTPUTS.add("Hello my name is John, and I'm a writer. I'm") def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/gpt2-xl-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
MixedInt8GPT2Test
python
wandb__wandb
wandb/agents/pyagent.py
{ "start": 1618, "end": 1749 }
class ____: QUEUED = "QUEUED" RUNNING = "RUNNING" STOPPED = "STOPPED" ERRORED = "ERRORED" DONE = "DONE"
RunStatus
python
oauthlib__oauthlib
tests/openid/connect/core/endpoints/test_claims_handling.py
{ "start": 746, "end": 4579 }
class ____(TestCase): DEFAULT_REDIRECT_URI = 'http://i.b./path' def set_scopes(self, scopes): def set_request_scopes(client_id, code, client, request): request.scopes = scopes return True return set_request_scopes def set_user(self, request): request.user = 'foo' request.client_id = 'bar' request.client = mock.MagicMock() request.client.client_id = 'mocked' return True def set_client(self, request): request.client = mock.MagicMock() request.client.client_id = 'mocked' return True def save_claims_with_code(self, client_id, code, request, *args, **kwargs): # a real validator would save the claims with the code during save_authorization_code() self.claims_from_auth_code_request = request.claims self.scopes = request.scopes.split() def retrieve_claims_saved_with_code(self, client_id, code, client, request, *args, **kwargs): request.claims = self.claims_from_auth_code_request request.scopes = self.scopes return True def save_claims_with_bearer_token(self, token, request, *args, **kwargs): # a real validator would save the claims with the access token during save_bearer_token() self.claims_saved_with_bearer_token = request.claims def setUp(self): self.validator = mock.MagicMock(spec=RequestValidator) self.validator.get_code_challenge.return_value = None self.validator.get_default_redirect_uri.return_value = TestClaimsHandling.DEFAULT_REDIRECT_URI self.validator.authenticate_client.side_effect = self.set_client self.validator.save_authorization_code.side_effect = self.save_claims_with_code self.validator.validate_code.side_effect = self.retrieve_claims_saved_with_code self.validator.save_token.side_effect = self.save_claims_with_bearer_token self.server = Server(self.validator) def test_claims_stored_on_code_creation(self): claims = { "id_token": { "claim_1": None, "claim_2": { "essential": True } }, "userinfo": { "claim_3": { "essential": True }, "claim_4": None } } claims_urlquoted = '%7B%22id_token%22%3A%20%7B%22claim_2%22%3A%20%7B%22essential%22%3A%20true%7D%2C%20%22claim_1%22%3A%20null%7D%2C%20%22userinfo%22%3A%20%7B%22claim_4%22%3A%20null%2C%20%22claim_3%22%3A%20%7B%22essential%22%3A%20true%7D%7D%7D' uri = 'http://example.com/path?client_id=abc&scope=openid+test_scope&response_type=code&claims=%s' h, b, s = self.server.create_authorization_response(uri % claims_urlquoted, scopes='openid test_scope') self.assertDictEqual(self.claims_from_auth_code_request, claims) code = get_query_credentials(h['Location'])['code'][0] token_uri = 'http://example.com/path' _, body, _ = self.server.create_token_response( token_uri, body='client_id=me&redirect_uri=http://back.to/me&grant_type=authorization_code&code=%s' % code ) self.assertDictEqual(self.claims_saved_with_bearer_token, claims) def test_invalid_claims(self): uri = 'http://example.com/path?client_id=abc&scope=openid+test_scope&response_type=code&claims=this-is-not-json' h, b, s = self.server.create_authorization_response(uri, scopes='openid test_scope') error = get_query_credentials(h['Location'])['error'][0] error_desc = get_query_credentials(h['Location'])['error_description'][0] self.assertEqual(error, 'invalid_request') self.assertEqual(error_desc, "Malformed claims parameter")
TestClaimsHandling
python
tiangolo__fastapi
docs_src/security/tutorial005_an_py310.py
{ "start": 1475, "end": 5426 }
class ____(User): hashed_password: str password_hash = PasswordHash.recommended() oauth2_scheme = OAuth2PasswordBearer( tokenUrl="token", scopes={"me": "Read information about the current user.", "items": "Read items."}, ) app = FastAPI() def verify_password(plain_password, hashed_password): return password_hash.verify(plain_password, hashed_password) def get_password_hash(password): return password_hash.hash(password) def get_user(db, username: str): if username in db: user_dict = db[username] return UserInDB(**user_dict) def authenticate_user(fake_db, username: str, password: str): user = get_user(fake_db, username) if not user: return False if not verify_password(password, user.hashed_password): return False return user def create_access_token(data: dict, expires_delta: timedelta | None = None): to_encode = data.copy() if expires_delta: expire = datetime.now(timezone.utc) + expires_delta else: expire = datetime.now(timezone.utc) + timedelta(minutes=15) to_encode.update({"exp": expire}) encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM) return encoded_jwt async def get_current_user( security_scopes: SecurityScopes, token: Annotated[str, Depends(oauth2_scheme)] ): if security_scopes.scopes: authenticate_value = f'Bearer scope="{security_scopes.scope_str}"' else: authenticate_value = "Bearer" credentials_exception = HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials", headers={"WWW-Authenticate": authenticate_value}, ) try: payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) username = payload.get("sub") if username is None: raise credentials_exception scope: str = payload.get("scope", "") token_scopes = scope.split(" ") token_data = TokenData(scopes=token_scopes, username=username) except (InvalidTokenError, ValidationError): raise credentials_exception user = get_user(fake_users_db, username=token_data.username) if user is None: raise credentials_exception for scope in security_scopes.scopes: if scope not in token_data.scopes: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Not enough permissions", headers={"WWW-Authenticate": authenticate_value}, ) return user async def get_current_active_user( current_user: Annotated[User, Security(get_current_user, scopes=["me"])], ): if current_user.disabled: raise HTTPException(status_code=400, detail="Inactive user") return current_user @app.post("/token") async def login_for_access_token( form_data: Annotated[OAuth2PasswordRequestForm, Depends()], ) -> Token: user = authenticate_user(fake_users_db, form_data.username, form_data.password) if not user: raise HTTPException(status_code=400, detail="Incorrect username or password") access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={"sub": user.username, "scope": " ".join(form_data.scopes)}, expires_delta=access_token_expires, ) return Token(access_token=access_token, token_type="bearer") @app.get("/users/me/", response_model=User) async def read_users_me( current_user: Annotated[User, Depends(get_current_active_user)], ): return current_user @app.get("/users/me/items/") async def read_own_items( current_user: Annotated[User, Security(get_current_active_user, scopes=["items"])], ): return [{"item_id": "Foo", "owner": current_user.username}] @app.get("/status/") async def read_system_status(current_user: Annotated[User, Depends(get_current_user)]): return {"status": "ok"}
UserInDB
python
kamyu104__LeetCode-Solutions
Python/find-time-required-to-eliminate-bacterial-strains.py
{ "start": 63, "end": 444 }
class ____(object): def minEliminationTime(self, timeReq, splitTime): """ :type timeReq: List[int] :type splitTime: int :rtype: int """ heapq.heapify(timeReq) for _ in xrange(len(timeReq)-1): heapq.heappush(timeReq, max(heapq.heappop(timeReq), heapq.heappop(timeReq))+splitTime) return timeReq[0]
Solution
python
dagster-io__dagster
python_modules/dagster/dagster/_core/execution/plan/inputs.py
{ "start": 16268, "end": 16902 }
class ____( StepInputSource, ): """This input source is for direct python values to be passed as inputs to ops.""" input_name: str def load_input_object( self, step_context: "StepExecutionContext", input_def: InputDefinition ) -> Iterator[object]: job_def = step_context.job_def yield job_def.get_direct_input_value(self.input_name) def required_resource_keys( self, _job_def: JobDefinition, op_handle: NodeHandle, op_input_name: str ) -> set[str]: return set() @whitelist_for_serdes(storage_field_names={"node_handle": "solid_handle"}) @record
FromDirectInputValue
python
numba__numba
numba/cuda/cudadrv/nvrtc.py
{ "start": 426, "end": 970 }
class ____(IntEnum): NVRTC_SUCCESS = 0 NVRTC_ERROR_OUT_OF_MEMORY = 1 NVRTC_ERROR_PROGRAM_CREATION_FAILURE = 2 NVRTC_ERROR_INVALID_INPUT = 3 NVRTC_ERROR_INVALID_PROGRAM = 4 NVRTC_ERROR_INVALID_OPTION = 5 NVRTC_ERROR_COMPILATION = 6 NVRTC_ERROR_BUILTIN_OPERATION_FAILURE = 7 NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION = 8 NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION = 9 NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID = 10 NVRTC_ERROR_INTERNAL_ERROR = 11 _nvrtc_lock = threading.Lock()
NvrtcResult
python
RaRe-Technologies__gensim
gensim/topic_coherence/text_analysis.py
{ "start": 14054, "end": 19974 }
class ____(WindowedTextsAnalyzer): """Accumulate word occurrences in parallel. Attributes ---------- processes : int Number of processes to use; must be at least two. args : Should include `relevant_ids` and `dictionary` (see :class:`~UsesDictionary.__init__`). kwargs : Can include `batch_size`, which is the number of docs to send to a worker at a time. If not included, it defaults to 64. """ def __init__(self, processes, *args, **kwargs): super(ParallelWordOccurrenceAccumulator, self).__init__(*args) if processes < 2: raise ValueError( "Must have at least 2 processes to run in parallel; got %d" % processes) self.processes = processes self.batch_size = kwargs.get('batch_size', 64) def __str__(self): return "%s<processes=%s, batch_size=%s>" % ( self.__class__.__name__, self.processes, self.batch_size) def accumulate(self, texts, window_size): workers, input_q, output_q = self.start_workers(window_size) try: self.queue_all_texts(input_q, texts, window_size) interrupted = False except KeyboardInterrupt: logger.warn("stats accumulation interrupted; <= %d documents processed", self._num_docs) interrupted = True accumulators = self.terminate_workers(input_q, output_q, workers, interrupted) return self.merge_accumulators(accumulators) def start_workers(self, window_size): """Set up an input and output queue and start processes for each worker. Notes ----- The input queue is used to transmit batches of documents to the workers. The output queue is used by workers to transmit the WordOccurrenceAccumulator instances. Parameters ---------- window_size : int Returns ------- (list of lists) Tuple of (list of workers, input queue, output queue). """ input_q = mp.Queue(maxsize=self.processes) output_q = mp.Queue() workers = [] for _ in range(self.processes): accumulator = PatchedWordOccurrenceAccumulator(self.relevant_ids, self.dictionary) worker = AccumulatingWorker(input_q, output_q, accumulator, window_size) worker.start() workers.append(worker) return workers, input_q, output_q def yield_batches(self, texts): """Return a generator over the given texts that yields batches of `batch_size` texts at a time.""" batch = [] for text in self._iter_texts(texts): batch.append(text) if len(batch) == self.batch_size: yield batch batch = [] if batch: yield batch def queue_all_texts(self, q, texts, window_size): """Sequentially place batches of texts on the given queue until `texts` is consumed. The texts are filtered so that only those with at least one relevant token are queued. """ for batch_num, batch in enumerate(self.yield_batches(texts)): q.put(batch, block=True) before = self._num_docs / self.log_every self._num_docs += sum(len(doc) - window_size + 1 for doc in batch) if before < (self._num_docs / self.log_every): logger.info( "%d batches submitted to accumulate stats from %d documents (%d virtual)", (batch_num + 1), (batch_num + 1) * self.batch_size, self._num_docs) def terminate_workers(self, input_q, output_q, workers, interrupted=False): """Wait until all workers have transmitted their WordOccurrenceAccumulator instances, then terminate each. Warnings -------- We do not use join here because it has been shown to have some issues in Python 2.7 (and even in later versions). This method also closes both the input and output queue. If `interrupted` is False (normal execution), a None value is placed on the input queue for each worker. The workers are looking for this sentinel value and interpret it as a signal to terminate themselves. If `interrupted` is True, a KeyboardInterrupt occurred. The workers are programmed to recover from this and continue on to transmit their results before terminating. So in this instance, the sentinel values are not queued, but the rest of the execution continues as usual. """ if not interrupted: for _ in workers: input_q.put(None, block=True) accumulators = [] while len(accumulators) != len(workers): accumulators.append(output_q.get()) logger.info("%d accumulators retrieved from output queue", len(accumulators)) for worker in workers: if worker.is_alive(): worker.terminate() input_q.close() output_q.close() return accumulators def merge_accumulators(self, accumulators): """Merge the list of accumulators into a single `WordOccurrenceAccumulator` with all occurrence and co-occurrence counts, and a `num_docs` that reflects the total observed by all the individual accumulators. """ accumulator = WordOccurrenceAccumulator(self.relevant_ids, self.dictionary) for other_accumulator in accumulators: accumulator.merge(other_accumulator) # Workers do partial accumulation, so none of the co-occurrence matrices are symmetrized. # This is by design, to avoid unnecessary matrix additions/conversions during accumulation. accumulator._symmetrize() logger.info("accumulated word occurrence stats for %d virtual documents", accumulator.num_docs) return accumulator
ParallelWordOccurrenceAccumulator
python
graphql-python__graphene
graphene/tests/issues/test_1394.py
{ "start": 59, "end": 947 }
class ____(ObjectType): hello = String(input=NonNull(String)) def resolve_hello(self, info, input): if input == "nothing": return None return f"Hello {input}!" schema = Schema(query=Query) def test_required_input_provided(): """ Test that a required argument works when provided. """ input_value = "Potato" result = schema.execute('{ hello(input: "%s") }' % input_value) assert not result.errors assert result.data == {"hello": "Hello Potato!"} def test_required_input_missing(): """ Test that a required argument raised an error if not provided. """ result = schema.execute("{ hello }") assert result.errors assert len(result.errors) == 1 assert ( result.errors[0].message == "Field 'hello' argument 'input' of type 'String!' is required, but it was not provided." )
Query
python
tornadoweb__tornado
tornado/test/httpclient_test.py
{ "start": 881, "end": 1087 }
class ____(RequestHandler): def get(self): name = self.get_argument("name", "world") self.set_header("Content-Type", "text/plain") self.finish("Hello %s!" % name)
HelloWorldHandler
python
pytorch__pytorch
torch/ao/quantization/fx/graph_module.py
{ "start": 3188, "end": 4541 }
class ____(ObservedGraphModule): def __init__( self, root: torch.nn.Module | dict[str, Any], graph: Graph, preserved_attr_names: set[str], ): preserved_attr_names = preserved_attr_names.union( { "_standalone_module_input_quantized_idxs", "_standalone_module_output_quantized_idxs", } ) super().__init__(root, graph, preserved_attr_names) def __deepcopy__(self, memo): fake_mod = torch.nn.Module() fake_mod.__dict__ = copy.deepcopy(self.__dict__) return ObservedStandaloneGraphModule( fake_mod, copy.deepcopy(self.graph), copy.deepcopy(self.preserved_attr_names), ) def _is_observed_standalone_module(module: Any) -> bool: return ( _is_observed_module(module) and module.meta["_observed_graph_module_attrs"].is_observed_standalone_module ) def _save_packed_weight(self, destination, prefix, keep_vars): for attr_name in dir(self): if "_packed_weight" in attr_name and isinstance( getattr(self, attr_name), torch._C.ScriptObject ): # type: ignore[attr-defined] packed_weight = getattr(self, attr_name) destination[prefix + attr_name] = packed_weight
ObservedStandaloneGraphModule
python
doocs__leetcode
solution/2400-2499/2441.Largest Positive Integer That Exists With Its Negative/Solution.py
{ "start": 0, "end": 144 }
class ____: def findMaxK(self, nums: List[int]) -> int: s = set(nums) return max((x for x in s if -x in s), default=-1)
Solution
python
bokeh__bokeh
src/bokeh/models/widgets/sliders.py
{ "start": 5124, "end": 5869 }
class ____(NumericalSlider): """ Slider-based number selection widget. """ # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) start = Required(Float, help=""" The minimum allowable value. """) end = Required(Float, help=""" The maximum allowable value. """) value = Required(Float, help=""" Initial or selected value. """) value_throttled = Readonly(Required(Float), help=""" Initial or selected value, throttled according to report only on mouseup. """) step = Float(default=1, help=""" The step between consecutive values. """) format = Override(default="0[.]00")
Slider
python
numba__numba
numba/core/generators.py
{ "start": 7985, "end": 8909 }
class ____(BaseGeneratorLower): """ Support class for lowering nopython generators. """ def get_generator_type(self): return self.fndesc.restype def box_generator_struct(self, lower, gen_struct): return gen_struct def lower_finalize_func_body(self, builder, genptr): """ Lower the body of the generator's finalizer: decref all live state variables. """ self.debug_print(builder, "# generator: finalize") if self.context.enable_nrt: # Always dereference all arguments # self.debug_print(builder, "# generator: clear args") args_ptr = self.get_args_ptr(builder, genptr) for ty, val in self.arg_packer.load(builder, args_ptr): self.context.nrt.decref(builder, ty, val) self.debug_print(builder, "# generator: finalize end") builder.ret_void()
GeneratorLower
python
walkccc__LeetCode
solutions/2941. Maximum GCD-Sum of a Subarray/2941.py
{ "start": 0, "end": 829 }
class ____: def maxGcdSum(self, nums: list[int], k: int) -> int: ans = 0 # [(startIndex, gcd of subarray starting at startIndex)] startIndexAndGcds = [] prefix = list(itertools.accumulate(nums, initial=0)) for i, num in enumerate(nums): nextStartIndexAndGcds = [] for startIndex, gcd in startIndexAndGcds: nextGcd = math.gcd(gcd, nums[i]) if (not nextStartIndexAndGcds or nextStartIndexAndGcds[-1][1] != nextGcd): # Skip duplicates. nextStartIndexAndGcds.append((startIndex, nextGcd)) startIndexAndGcds = nextStartIndexAndGcds startIndexAndGcds.append((i, nums[i])) for startIndex, gcd in startIndexAndGcds: if i - startIndex + 1 >= k: ans = max(ans, (prefix[i + 1] - prefix[startIndex]) * gcd) return ans
Solution
python
coleifer__peewee
tests/base_models.py
{ "start": 1442, "end": 1523 }
class ____(TestModel): a = ForeignKeyField(A, backref='bs') b = TextField()
B
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 66218, "end": 66546 }
class ____(_PrintableStructure): _fields_ = [ ('version', c_uint), ('vgpuProcessCount', c_uint), ('lastSeenTimeStamp', c_ulonglong), ('vgpuProcUtilArray', POINTER(c_nvmlVgpuProcessUtilizationInfo_v1_t)), ] VgpuProcessesUtilizationInfo_v1 = 0x01000018
c_nvmlVgpuProcessesUtilizationInfo_v1_t
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constructor24.py
{ "start": 1158, "end": 1801 }
class ____(Generic[U]): def __init__(self) -> None: self.containers: List[Container[U]] = [] def method1(self, a: U): Container[U](a) Container() Container(123) # This should generate an error if strictParameterNoneValue is true. Container[U]() # This should generate an error if strictParameterNoneValue is true. Container[U](None) def method2(self): Container[U].create() def func1(obv: Container[T], default_value: T = None) -> None: # This should generate an error if strictParameterNoneValue is false. obv.on_next(default_value)
ContainerList
python
coleifer__peewee
tests/cockroachdb.py
{ "start": 424, "end": 517 }
class ____(TestModel): title = TextField() tags = ArrayField(TextField, index=False)
Arr
python
jazzband__django-oauth-toolkit
tests/test_scopes.py
{ "start": 1708, "end": 2397 }
class ____(TestCase): factory = RequestFactory() @classmethod def setUpTestData(cls): cls.test_user = UserModel.objects.create_user("test_user", "test@example.com", "123456") cls.dev_user = UserModel.objects.create_user("dev_user", "dev@example.com", "123456") cls.application = Application.objects.create( name="Test Application", redirect_uris="http://localhost http://example.com http://example.org", user=cls.dev_user, client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, client_secret=CLEARTEXT_SECRET, )
BaseTest
python
huggingface__transformers
tests/models/clipseg/test_modeling_clipseg.py
{ "start": 10507, "end": 12092 }
class ____(ModelTesterMixin, unittest.TestCase): all_model_classes = (CLIPSegTextModel,) if is_torch_available() else () model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = CLIPSegTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPSegTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIPSeg does not use inputs_embeds") def test_inputs_embeds(self): pass @slow def test_model_from_pretrained(self): model_name = "CIDAS/clipseg-rd64-refined" model = CLIPSegTextModel.from_pretrained(model_name) self.assertIsNotNone(model)
CLIPSegTextModelTest
python
getsentry__sentry
src/sentry/workflow_engine/migrations/0076_add_detector_group_table.py
{ "start": 329, "end": 2992 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("sentry", "0943_create_data_access_grant"), ("workflow_engine", "0075_add_index_to_dcg_action"), ] operations = [ SafeRunSQL( """DROP TABLE IF EXISTS "workflow_engine_detectorgroup";""", reverse_sql=migrations.RunSQL.noop, hints={"tables": ["workflow_engine_detectorgroup"]}, ), # this migration was successfully run in S4S and DE, failed in US migrations.CreateModel( name="DetectorGroup", fields=[ ( "id", sentry.db.models.fields.bounded.BoundedBigAutoField( primary_key=True, serialize=False ), ), ("date_updated", models.DateTimeField(auto_now=True)), ("date_added", models.DateTimeField(auto_now_add=True)), ( "detector", sentry.db.models.fields.foreignkey.FlexibleForeignKey( on_delete=django.db.models.deletion.CASCADE, to="workflow_engine.detector", ), ), ( "group", sentry.db.models.fields.foreignkey.FlexibleForeignKey( db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to="sentry.group", ), ), ], options={ "db_table": "workflow_engine_detectorgroup", }, ), ]
Migration
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dataprep.py
{ "start": 4255, "end": 4782 }
class ____: @mock.patch("airflow.providers.google.cloud.operators.dataprep.GoogleDataprepHook") def test_execute(self, hook_mock): op = DataprepRunJobGroupOperator( dataprep_conn_id=DATAPREP_CONN_ID, body_request=DATA, task_id=TASK_ID, ) op.execute(context=None) hook_mock.assert_called_once_with(dataprep_conn_id="dataprep_default") hook_mock.return_value.run_job_group.assert_called_once_with(body_request=DATA)
TestDataprepRunJobGroupOperator
python
sqlalchemy__sqlalchemy
test/orm/test_relationships.py
{ "start": 132482, "end": 136799 }
class ____(_RelationshipErrors, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("a", metadata, Column("id", Integer, primary_key=True)) Table( "b", metadata, Column("id", Integer, primary_key=True), Column("aid_1", Integer, ForeignKey("a.id")), Column("aid_2", Integer, ForeignKey("a.id")), ) Table("atob", metadata, Column("aid", Integer), Column("bid", Integer)) Table( "atob_ambiguous", metadata, Column("aid1", Integer, ForeignKey("a.id")), Column("bid1", Integer, ForeignKey("b.id")), Column("aid2", Integer, ForeignKey("a.id")), Column("bid2", Integer, ForeignKey("b.id")), ) @classmethod def setup_classes(cls): class A(cls.Basic): pass class B(cls.Basic): pass def test_ambiguous_fks_o2m(self): A, B = self.classes.A, self.classes.B a, b = self.tables.a, self.tables.b self.mapper_registry.map_imperatively( A, a, properties={"bs": relationship(B)} ) self.mapper_registry.map_imperatively(B, b) self._assert_raises_ambig_join(configure_mappers, "A.bs", None) def test_with_fks_o2m(self): A, B = self.classes.A, self.classes.B a, b = self.tables.a, self.tables.b self.mapper_registry.map_imperatively( A, a, properties={"bs": relationship(B, foreign_keys=b.c.aid_1)} ) self.mapper_registry.map_imperatively(B, b) sa.orm.configure_mappers() assert A.bs.property.primaryjoin.compare(a.c.id == b.c.aid_1) eq_(A.bs.property._calculated_foreign_keys, {b.c.aid_1}) def test_with_pj_o2m(self): A, B = self.classes.A, self.classes.B a, b = self.tables.a, self.tables.b self.mapper_registry.map_imperatively( A, a, properties={ "bs": relationship(B, primaryjoin=a.c.id == b.c.aid_1) }, ) self.mapper_registry.map_imperatively(B, b) sa.orm.configure_mappers() assert A.bs.property.primaryjoin.compare(a.c.id == b.c.aid_1) eq_(A.bs.property._calculated_foreign_keys, {b.c.aid_1}) def test_with_annotated_pj_o2m(self): A, B = self.classes.A, self.classes.B a, b = self.tables.a, self.tables.b self.mapper_registry.map_imperatively( A, a, properties={ "bs": relationship(B, primaryjoin=a.c.id == foreign(b.c.aid_1)) }, ) self.mapper_registry.map_imperatively(B, b) sa.orm.configure_mappers() assert A.bs.property.primaryjoin.compare(a.c.id == b.c.aid_1) eq_(A.bs.property._calculated_foreign_keys, {b.c.aid_1}) def test_no_fks_m2m(self): A, B = self.classes.A, self.classes.B a, b, a_to_b = self.tables.a, self.tables.b, self.tables.atob self.mapper_registry.map_imperatively( A, a, properties={"bs": relationship(B, secondary=a_to_b)} ) self.mapper_registry.map_imperatively(B, b) self._assert_raises_no_join(sa.orm.configure_mappers, "A.bs", a_to_b) def test_ambiguous_fks_m2m(self): A, B = self.classes.A, self.classes.B a, b, a_to_b = self.tables.a, self.tables.b, self.tables.atob_ambiguous self.mapper_registry.map_imperatively( A, a, properties={"bs": relationship(B, secondary=a_to_b)} ) self.mapper_registry.map_imperatively(B, b) self._assert_raises_ambig_join( configure_mappers, "A.bs", "atob_ambiguous" ) def test_with_fks_m2m(self): A, B = self.classes.A, self.classes.B a, b, a_to_b = self.tables.a, self.tables.b, self.tables.atob_ambiguous self.mapper_registry.map_imperatively( A, a, properties={ "bs": relationship( B, secondary=a_to_b, foreign_keys=[a_to_b.c.aid1, a_to_b.c.bid1], ) }, ) self.mapper_registry.map_imperatively(B, b) sa.orm.configure_mappers()
AmbiguousFKResolutionTest
python
keras-team__keras
keras/src/ops/math_test.py
{ "start": 10630, "end": 16240 }
class ____(testing.TestCase): @parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)]) @pytest.mark.skipif( backend.backend() == "jax", reason="JAX does not support `num_segments=None`.", ) def test_segment_reduce(self, segment_reduce_op): # 1D case data = KerasTensor((10, 4), dtype="float32") segment_ids = KerasTensor((10,), dtype="int32") outputs = segment_reduce_op(data, segment_ids) self.assertEqual(outputs.shape, (None, 4)) data = KerasTensor((10,), dtype="float32") segment_ids = KerasTensor((10,), dtype="int32") outputs = segment_reduce_op(data, segment_ids) self.assertEqual(outputs.shape, (None,)) @parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)]) def test_segment_reduce_explicit_num_segments(self, segment_reduce_op): # 1D case data = KerasTensor((10, 4), dtype="float32") segment_ids = KerasTensor((10,), dtype="int32") outputs = segment_reduce_op(data, segment_ids, num_segments=5) self.assertEqual(outputs.shape, (5, 4)) data = KerasTensor((6,), dtype="float32") segment_ids = KerasTensor( (6,), dtype="int32", ) outputs = segment_reduce_op(data, segment_ids, num_segments=5) self.assertEqual(outputs.shape, (5,)) def test_topk(self): x = KerasTensor((1, 2, 3)) values, indices = kmath.top_k(x, k=1) self.assertEqual(values.shape, (1, 2, 1)) self.assertEqual(indices.shape, (1, 2, 1)) def test_in_top_k(self): targets = KerasTensor((5,)) predictions = KerasTensor((5, 10)) self.assertEqual(kmath.in_top_k(targets, predictions, k=1).shape, (5,)) def test_logsumexp(self): x = KerasTensor((1, 2, 3), dtype="float32") result = kmath.logsumexp(x) self.assertEqual(result.shape, ()) def test_extract_sequences(self): x = KerasTensor((10, 16), dtype="float32") sequence_length = 3 sequence_stride = 2 outputs = kmath.extract_sequences(x, sequence_length, sequence_stride) num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride self.assertEqual(outputs.shape, (10, num_sequences, sequence_length)) def test_fft(self): real = KerasTensor((2, 4, 3), dtype="float32") imag = KerasTensor((2, 4, 3), dtype="float32") real_output, imag_output = kmath.fft((real, imag)) ref = np.fft.fft(np.ones((2, 4, 3))) self.assertEqual(real_output.shape, ref.shape) self.assertEqual(imag_output.shape, ref.shape) def test_fft2(self): real = KerasTensor((2, 4, 3), dtype="float32") imag = KerasTensor((2, 4, 3), dtype="float32") real_output, imag_output = kmath.fft2((real, imag)) ref = np.fft.fft2(np.ones((2, 4, 3))) self.assertEqual(real_output.shape, ref.shape) self.assertEqual(imag_output.shape, ref.shape) def test_ifft2(self): real = KerasTensor((2, 4, 3), dtype="float32") imag = KerasTensor((2, 4, 3), dtype="float32") real_output, imag_output = kmath.ifft2((real, imag)) ref = np.fft.ifft2(np.ones((2, 4, 3))) self.assertEqual(real_output.shape, ref.shape) self.assertEqual(imag_output.shape, ref.shape) def test_rfft(self): x = KerasTensor((2, 4, 3), dtype="float32") real_output, imag_output = kmath.rfft(x) ref = np.fft.rfft(np.ones((2, 4, 3))) self.assertEqual(real_output.shape, ref.shape) self.assertEqual(imag_output.shape, ref.shape) def test_irfft(self): real = KerasTensor((2, 4, 3), dtype="float32") imag = KerasTensor((2, 4, 3), dtype="float32") output = kmath.irfft((real, imag)) ref = np.fft.irfft(np.ones((2, 4, 3))) self.assertEqual(output.shape, ref.shape) def test_rsqrt(self): x = KerasTensor([4, 3], dtype="float32") self.assertEqual(kmath.rsqrt(x).shape, (4, 3)) def test_stft(self): x = KerasTensor((2, 32), dtype="float32") sequence_length = 10 sequence_stride = 3 fft_length = 15 real_output, imag_output = kmath.stft( x, sequence_length, sequence_stride, fft_length ) real_ref, imag_ref = _stft( np.ones((2, 32)), sequence_length, sequence_stride, fft_length ) self.assertEqual(real_output.shape, real_ref.shape) self.assertEqual(imag_output.shape, imag_ref.shape) def test_istft(self): # sequence_stride must <= x[0].shape[-1] # sequence_stride must >= fft_length / num_sequences sequence_length = 10 sequence_stride = 3 fft_length = 15 num_sequences = fft_length // sequence_stride + 1 real = KerasTensor((num_sequences, 32), dtype="float32") imag = KerasTensor((num_sequences, 32), dtype="float32") output = kmath.istft( (real, imag), sequence_length, sequence_stride, fft_length ) ref = _istft( (np.ones((num_sequences, 32)), np.ones((num_sequences, 32))), sequence_length, sequence_stride, fft_length, ) self.assertEqual(output.shape, ref.shape) def test_logdet(self): x = KerasTensor((3, 3)) out = kmath.logdet(x) self.assertEqual(out.shape, ()) x = KerasTensor((2, 4, 3, 3)) out = kmath.logdet(x) self.assertEqual(out.shape, (2, 4))
MathOpsStaticShapeTest
python
readthedocs__readthedocs.org
readthedocs/search/documents.py
{ "start": 628, "end": 1048 }
class ____: def update(self, *args, **kwargs): # Hack a fix to our broken connection pooling # This creates a new connection on every request, # but actually works :) log.debug("Hacking Elastic indexing to fix connection pooling") self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL["default"]) super().update(*args, **kwargs) @project_index.document
RTDDocTypeMixin
python
ray-project__ray
python/ray/serve/tests/unit/test_proxy.py
{ "start": 5524, "end": 6028 }
class ____: def __init__(self): self.messages = [] async def __call__(self, message): self.messages.append(message) async def _consume_proxy_generator( gen: ResponseGenerator, ) -> Tuple[ResponseStatus, List]: status = None messages = [] async for message in gen: if isinstance(message, ResponseStatus): status = message else: messages.append(message) assert status is not None return status, messages
FakeHttpSend
python
great-expectations__great_expectations
great_expectations/metrics/metric.py
{ "start": 1487, "end": 2524 }
class ____(ModelMetaclass): """Metaclass for Metric classes that maintains a registry of all concrete Metric types.""" _registry: dict[str, type["Metric"]] = {} def __new__(cls, name, bases, attrs, **kwargs): register_cls = super().__new__(cls, name, bases, attrs) # Don't register the base Metric class if name != "Metric": metric_name = attrs.get("name") # Some subclasses of metric may not have a name # Those classes will not be registered if metric_name: MetaMetric._registry[metric_name] = register_cls return register_cls @classmethod def get_registered_metric_class_from_metric_name(cls, metric_name: str) -> type["Metric"]: """Returns the registered Metric class for a given metric name.""" try: return cls._registry[metric_name] except KeyError: raise UnregisteredMetricError(metric_name) _MetricResult = TypeVar("_MetricResult", bound=MetricResult)
MetaMetric
python
tiangolo__fastapi
scripts/notify_translations.py
{ "start": 2906, "end": 2998 }
class ____(BaseModel): nodes: List[AllDiscussionsDiscussionNode]
AllDiscussionsDiscussions
python
scipy__scipy
scipy/optimize/_nonlin.py
{ "start": 19328, "end": 26554 }
class ____: r""" A matrix represented as .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger However, if the rank of the matrix reaches the dimension of the vectors, full matrix representation will be used thereon. """ # generic type compatibility with scipy-stubs __class_getitem__ = classmethod(GenericAlias) def __init__(self, alpha, n, dtype): self.alpha = alpha self.cs = [] self.ds = [] self.n = n self.dtype = dtype self.collapsed = None @staticmethod def _matvec(v, alpha, cs, ds): axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'], cs[:1] + [v]) w = alpha * v for c, d in zip(cs, ds): a = dotc(d, v) w = axpy(c, w, w.size, a) return w @staticmethod def _solve(v, alpha, cs, ds): """Evaluate w = M^-1 v""" if len(cs) == 0: return v/alpha # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1 axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v]) c0 = cs[0] A = alpha * np.identity(len(cs), dtype=c0.dtype) for i, d in enumerate(ds): for j, c in enumerate(cs): A[i,j] += dotc(d, c) q = np.zeros(len(cs), dtype=c0.dtype) for j, d in enumerate(ds): q[j] = dotc(d, v) q /= alpha q = solve(A, q) w = v/alpha for c, qc in zip(cs, q): w = axpy(c, w, w.size, -qc) return w def matvec(self, v): """Evaluate w = M v""" if self.collapsed is not None: return np.dot(self.collapsed, v) return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds) def rmatvec(self, v): """Evaluate w = M^H v""" if self.collapsed is not None: return np.dot(self.collapsed.T.conj(), v) return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs) def solve(self, v, tol=0): """Evaluate w = M^-1 v""" if self.collapsed is not None: return solve(self.collapsed, v) return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) def rsolve(self, v, tol=0): """Evaluate w = M^-H v""" if self.collapsed is not None: return solve(self.collapsed.T.conj(), v) return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) def append(self, c, d): if self.collapsed is not None: self.collapsed += c[:,None] * d[None,:].conj() return self.cs.append(c) self.ds.append(d) if len(self.cs) > c.size: self.collapse() def __array__(self, dtype=None, copy=None): if dtype is not None: warnings.warn("LowRankMatrix is scipy-internal code, `dtype` " f"should only be None but was {dtype} (not handled)", stacklevel=3) if copy is not None: warnings.warn("LowRankMatrix is scipy-internal code, `copy` " f"should only be None but was {copy} (not handled)", stacklevel=3) if self.collapsed is not None: return self.collapsed Gm = self.alpha*np.identity(self.n, dtype=self.dtype) for c, d in zip(self.cs, self.ds): Gm += c[:,None]*d[None,:].conj() return Gm def collapse(self): """Collapse the low-rank matrix to a full-rank one.""" self.collapsed = np.array(self, copy=copy_if_needed) self.cs = None self.ds = None self.alpha = None def restart_reduce(self, rank): """ Reduce the rank of the matrix by dropping all vectors. """ if self.collapsed is not None: return assert rank > 0 if len(self.cs) > rank: del self.cs[:] del self.ds[:] def simple_reduce(self, rank): """ Reduce the rank of the matrix by dropping oldest vectors. """ if self.collapsed is not None: return assert rank > 0 while len(self.cs) > rank: del self.cs[0] del self.ds[0] def svd_reduce(self, max_rank, to_retain=None): """ Reduce the rank of the matrix by retaining some SVD components. This corresponds to the \"Broyden Rank Reduction Inverse\" algorithm described in [1]_. Note that the SVD decomposition can be done by solving only a problem whose size is the effective rank of this matrix, which is viable even for large problems. Parameters ---------- max_rank : int Maximum rank of this matrix after reduction. to_retain : int, optional Number of SVD components to retain when reduction is done (ie. rank > max_rank). Default is ``max_rank - 2``. References ---------- .. [1] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ if self.collapsed is not None: return p = max_rank if to_retain is not None: q = to_retain else: q = p - 2 if self.cs: p = min(p, len(self.cs[0])) q = max(0, min(q, p-1)) m = len(self.cs) if m < p: # nothing to do return C = np.array(self.cs).T D = np.array(self.ds).T D, R = qr(D, mode='economic') C = dot(C, R.T.conj()) U, S, WH = svd(C, full_matrices=False) C = dot(C, inv(WH)) D = dot(D, WH.T.conj()) for k in range(q): self.cs[k] = C[:,k].copy() self.ds[k] = D[:,k].copy() del self.cs[q:] del self.ds[q:] _doc_parts['broyden_params'] = _dedent_for_py313(""" alpha : float, optional Initial guess for the Jacobian is ``(-1/alpha)``. reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart``: drop all matrix columns. Has no extra parameters. - ``simple``: drop oldest matrix column. Has no extra parameters. - ``svd``: keep only the most significant SVD components. Takes an extra parameter, ``to_retain``, which determines the number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (i.e., no rank reduction). """).strip()
LowRankMatrix
python
spyder-ide__spyder
spyder/plugins/findinfiles/widgets/search_thread.py
{ "start": 816, "end": 14874 }
class ____(QThread): """Find in files search thread.""" PYTHON_EXTENSIONS = ['.py', '.pyw', '.pyx', '.ipy', '.pyi', '.pyt'] USEFUL_EXTENSIONS = [ '.ipynb', '.md', '.c', '.cpp', '.h', '.cxx', '.f', '.f03', '.f90', '.json', '.dat', '.csv', '.tsv', '.txt', '.md', '.rst', '.yml', '.yaml', '.ini', '.bat', '.sh', '.ui' ] SKIPPED_EXTENSIONS = ['.svg'] sig_finished = Signal(bool) sig_current_file = Signal(str) sig_current_folder = Signal(str) sig_file_match = Signal(object) sig_line_match = Signal(object, object) sig_out_print = Signal(object) # Batch power sizes (2**power) power = 0 # 0**1 = 1 max_power = 9 # 2**9 = 512 def __init__(self, parent, search_text, text_color, max_results=1000): super().__init__(parent) self.search_text = search_text self.text_color = text_color self.max_results = max_results self.mutex = QMutex() self.stopped = None self.pathlist = None self.total_matches = None self.error_flag = None self.rootpath = None self.exclude = None self.texts = None self.text_re = None self.completed = None self.case_sensitive = True self.total_matches = 0 self.is_file = False self.results = {} self.num_files = 0 self.files = [] self.partial_results = [] self.total_items = 0 def initialize(self, path, is_file, exclude, texts, text_re, case_sensitive): self.rootpath = path if exclude: self.exclude = re.compile(exclude) self.texts = texts self.text_re = text_re self.is_file = is_file self.stopped = False self.completed = False self.case_sensitive = case_sensitive def run(self): try: self.filenames = [] if self.is_file: self.find_string_in_file(self.rootpath) else: self.find_files_in_path(self.rootpath) except Exception: # Important note: we have to handle unexpected exceptions by # ourselves because they won't be catched by the main thread # (known QThread limitation/bug) traceback.print_exc() self.error_flag = _("Unexpected error: see internal console") self.stop() self.sig_finished.emit(self.completed) def stop(self): with QMutexLocker(self.mutex): self.stopped = True def find_files_in_path(self, path): if self.pathlist is None: self.pathlist = [] self.pathlist.append(path) for path, dirs, files in os.walk(path): with QMutexLocker(self.mutex): if self.stopped: return False try: # For directories for d in dirs[:]: with QMutexLocker(self.mutex): if self.stopped: return False dirname = os.path.join(path, d) # Only search in regular directories. # The try/except is necessary to catch an error when Python # can't access a directory with junctions on Windows. # Fixes spyder-ide/spyder#24898 try: st_dir_mode = os.stat(dirname).st_mode if not stat.S_ISDIR(st_dir_mode): dirs.remove(d) except OSError: dirs.remove(d) if (self.exclude and re.search(self.exclude, dirname + os.sep)): # Exclude patterns defined by the user dirs.remove(d) elif d.startswith('.'): # Exclude all dot dirs. dirs.remove(d) # For files for f in files: with QMutexLocker(self.mutex): if self.stopped: return False filename = os.path.join(path, f) ext = osp.splitext(filename)[1] # Only search in regular files (i.e. not pipes). # The try/except is necessary to catch an error when # Python can't get the file status due to too many levels # of symbolic links. # Fixes spyder-ide/spyder#20798 try: st_file_mode = os.stat(filename).st_mode if not stat.S_ISREG(st_file_mode): continue except OSError: continue # Exclude patterns defined by the user if self.exclude and re.search(self.exclude, filename): continue # Don't search in plain text files with skipped extensions # (e.g .svg) if ext in self.SKIPPED_EXTENSIONS: continue # It's much faster to check for extension first before # validating if the file is plain text. if ( ext in self.PYTHON_EXTENSIONS or ext in self.USEFUL_EXTENSIONS or ext in EDIT_EXTENSIONS or is_text_file(filename) ): self.find_string_in_file(filename) except re.error: self.error_flag = _("invalid regular expression") return False except FileNotFoundError: return False # Process any pending results if self.partial_results: self.process_results() return True def find_string_in_file(self, fname): self.error_flag = False self.sig_current_file.emit(fname) try: for lineno, line in enumerate(open(fname, 'rb')): for text, enc in self.texts: with QMutexLocker(self.mutex): if self.stopped: return False line_search = line if not self.case_sensitive: line_search = line_search.lower() if self.text_re: found = re.search(text, line_search) if found is not None: break else: found = line_search.find(text) if found > -1: break try: line_dec = line.decode(enc) except UnicodeDecodeError: line_dec = line if not self.case_sensitive: line = line.lower() if self.text_re: for match in re.finditer(text, line): with QMutexLocker(self.mutex): if self.stopped: return False self.total_matches += 1 bstart, bend = match.start(), match.end() try: # Go from binary position to utf8 position start = len(line[:bstart].decode(enc)) end = start + len(line[bstart:bend].decode(enc)) except UnicodeDecodeError: start = bstart end = bend self.partial_results.append((osp.abspath(fname), lineno + 1, start, end, line_dec)) if len(self.partial_results) > (2**self.power): self.process_results() if self.power < self.max_power: self.power += 1 else: found = line.find(text) while found > -1: with QMutexLocker(self.mutex): if self.stopped: return False self.total_matches += 1 try: # Go from binary position to utf8 position start = len(line[:found].decode(enc)) end = start + len(text.decode(enc)) except UnicodeDecodeError: start = found end = found + len(text) self.partial_results.append((osp.abspath(fname), lineno + 1, start, end, line_dec)) if len(self.partial_results) > (2**self.power): self.process_results() if self.power < self.max_power: self.power += 1 for text, enc in self.texts: found = line.find(text, found + 1) if found > -1: break except IOError as xxx_todo_changeme: (_errno, _strerror) = xxx_todo_changeme.args self.error_flag = _("permission denied errors were encountered") # Process any pending results if self.is_file and self.partial_results: self.process_results() self.completed = True def process_results(self): """ Process all matches found inside a file. Creates the necessary files and emits signal for the creation of file item. Creates the necessary data for lines found and emits signal for the creation of line items in batch. Creates the title based on the last entry of the lines batch. """ items = [] num_matches = self.total_matches for result in self.partial_results: if self.total_items < self.max_results: filename, lineno, colno, match_end, line = result if filename not in self.files: self.files.append(filename) self.sig_file_match.emit(filename) self.num_files += 1 line = self.truncate_result(line, colno, match_end) item = (filename, lineno, colno, line, match_end) items.append(item) self.total_items += 1 # Process title title = "'%s' - " % self.search_text nb_files = self.num_files if nb_files == 0: text = _('String not found') else: text_matches = _('matches in') text_files = _('file') if nb_files > 1: text_files += 's' text = "%d %s %d %s" % (num_matches, text_matches, nb_files, text_files) title = title + text self.partial_results = [] self.sig_line_match.emit(items, title) def truncate_result(self, line, start, end): """ Shorten text on line to display the match within `max_line_length`. """ html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;", ">": "&gt;", "<": "&lt;", } def html_escape(text): """Produce entities within text.""" return "".join(html_escape_table.get(c, c) for c in text) line = str(line) left, match, right = line[:start], line[start:end], line[end:] if len(line) > MAX_RESULT_LENGTH: offset = (len(line) - len(match)) // 2 left = left.split(' ') num_left_words = len(left) if num_left_words == 1: left = left[0] if len(left) > MAX_NUM_CHAR_FRAGMENT: left = ELLIPSIS + left[-offset:] left = [left] right = right.split(' ') num_right_words = len(right) if num_right_words == 1: right = right[0] if len(right) > MAX_NUM_CHAR_FRAGMENT: right = right[:offset] + ELLIPSIS right = [right] left = left[-4:] right = right[:4] if len(left) < num_left_words: left = [ELLIPSIS] + left if len(right) < num_right_words: right = right + [ELLIPSIS] left = ' '.join(left) right = ' '.join(right) if len(left) > MAX_NUM_CHAR_FRAGMENT: left = ELLIPSIS + left[-30:] if len(right) > MAX_NUM_CHAR_FRAGMENT: right = right[:30] + ELLIPSIS match_color = SpyderPalette.COLOR_OCCURRENCE_4 trunc_line = dict( text=''.join([left, match, right]), formatted_text=( f'<span style="color:{self.text_color}">' f'{html_escape(left)}' f'<span style="background-color:{match_color}">' f'{html_escape(match)}' f'</span>' f'{html_escape(right)}' f'</span>' ) ) return trunc_line def get_results(self): return self.results, self.pathlist, self.total_matches, self.error_flag
SearchThread
python
pytorch__pytorch
benchmarks/transformer/score_mod.py
{ "start": 4901, "end": 5677 }
class ____: shape: tuple[int, ...] # [B, Hq, M, Hkv, N, D] attn_type: str dtype: torch.dtype calculate_bwd_time: bool cal_bandwidth: bool backends: list[str] max_autotune: bool def __post_init__(self): assert len(self.shape) == 6, ( "Shape must be of length 6" ) # [B, Hq, M, Hkv, N, D] def asdict(self): # Convert the dataclass instance to a dictionary d = asdict(self) # Remove the 'calculate_bwd_time' and `cal_bandwidth` key d.pop("calculate_bwd_time", None) d.pop("cal_bandwidth", None) d["shape(B,Hq,M,Hkv,N,D)"] = d.pop("shape") d.pop("backends", None) d.pop("max_autotune", False) return d @dataclass(frozen=True)
ExperimentConfig
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/no_method_decorator.py
{ "start": 27, "end": 493 }
class ____: COLORS = [] def __init__(self, color): self.color = color def pick_colors(cls, *args): # [no-classmethod-decorator] """classmethod to pick fruit colors""" cls.COLORS = args pick_colors = classmethod(pick_colors) def pick_one_color(): # [no-staticmethod-decorator] """staticmethod to pick one fruit color""" return choice(Fruit.COLORS) pick_one_color = staticmethod(pick_one_color)
Fruit
python
viewflow__viewflow
viewflow/workflow/managers.py
{ "start": 1950, "end": 2476 }
class ____(ModelIterable): def __iter__(self): base_iterator = super().__iter__() if getattr(self.queryset, "_coerced", False): for process in base_iterator: if isinstance(process, self.queryset.model): process = coerce_to_related_instance( process, process.flow_class.process_class ) yield process else: for process in base_iterator: yield process
ProcessIterable
python
encode__django-rest-framework
rest_framework/templatetags/rest_framework.py
{ "start": 807, "end": 9820 }
class ____(template.Node): style = 'emacs' def __init__(self, lang, code): self.lang = lang self.nodelist = code def render(self, context): text = self.nodelist.render(context) return pygments_highlight(text, self.lang, self.style) @register.filter() def with_location(fields, location): return [ field for field in fields if field.location == location ] @register.simple_tag def form_for_link(link): import coreschema properties = { field.name: field.schema or coreschema.String() for field in link.fields } required = [ field.name for field in link.fields if field.required ] schema = coreschema.Object(properties=properties, required=required) return mark_safe(coreschema.render_to_form(schema)) @register.simple_tag def render_markdown(markdown_text): if apply_markdown is None: return markdown_text return mark_safe(apply_markdown(markdown_text)) @register.simple_tag def get_pagination_html(pager): return pager.to_html() @register.simple_tag def render_form(serializer, template_pack=None): style = {'template_pack': template_pack} if template_pack else {} renderer = HTMLFormRenderer() return renderer.render(serializer.data, None, {'style': style}) @register.simple_tag def render_field(field, style): renderer = style.get('renderer', HTMLFormRenderer()) return renderer.render_field(field, style) @register.simple_tag def optional_login(request): """ Include a login snippet if REST framework's login view is in the URLconf. """ try: login_url = reverse('rest_framework:login') except NoReverseMatch: return '' snippet = "<li><a href='{href}?next={next}'>Log in</a></li>" snippet = format_html(snippet, href=login_url, next=escape(request.path)) return mark_safe(snippet) @register.simple_tag def optional_docs_login(request): """ Include a login snippet if REST framework's login view is in the URLconf. """ try: login_url = reverse('rest_framework:login') except NoReverseMatch: return 'log in' snippet = "<a href='{href}?next={next}'>log in</a>" snippet = format_html(snippet, href=login_url, next=escape(request.path)) return mark_safe(snippet) @register.simple_tag def optional_logout(request, user, csrf_token): """ Include a logout snippet if REST framework's logout view is in the URLconf. """ try: logout_url = reverse('rest_framework:logout') except NoReverseMatch: snippet = format_html('<li class="navbar-text">{user}</li>', user=escape(user)) return mark_safe(snippet) snippet = """<li class="dropdown"> <a href="#" class="dropdown-toggle" data-toggle="dropdown"> {user} <b class="caret"></b> </a> <ul class="dropdown-menu"> <form id="logoutForm" method="post" action="{href}?next={next}"> <input type="hidden" name="csrfmiddlewaretoken" value="{csrf_token}"> </form> <li> <a href="#" onclick='document.getElementById("logoutForm").submit()'>Log out</a> </li> </ul> </li>""" snippet = format_html(snippet, user=escape(user), href=logout_url, next=escape(request.path), csrf_token=csrf_token) return mark_safe(snippet) @register.simple_tag def add_query_param(request, key, val): """ Add a query parameter to the current request url, and return the new url. """ iri = request.get_full_path() uri = iri_to_uri(iri) return escape(replace_query_param(uri, key, val)) @register.filter def as_string(value): if value is None: return '' return '%s' % value @register.filter def as_list_of_strings(value): return [ '' if (item is None) else ('%s' % item) for item in value ] @register.filter def add_class(value, css_class): """ https://stackoverflow.com/questions/4124220/django-adding-css-classes-when-rendering-form-fields-in-a-template Inserts classes into template variables that contain HTML tags, useful for modifying forms without needing to change the Form objects. Usage: {{ field.label_tag|add_class:"control-label" }} In the case of REST Framework, the filter is used to add Bootstrap-specific classes to the forms. """ html = str(value) match = class_re.search(html) if match: m = re.search(r'^%s$|^%s\s|\s%s\s|\s%s$' % (css_class, css_class, css_class, css_class), match.group(1)) if not m: return mark_safe(class_re.sub(match.group(1) + " " + css_class, html)) else: return mark_safe(html.replace('>', ' class="%s">' % css_class, 1)) return value @register.filter def format_value(value): if getattr(value, 'is_hyperlink', False): name = str(value.obj) return mark_safe('<a href=%s>%s</a>' % (value, escape(name))) if value is None or isinstance(value, bool): return mark_safe('<code>%s</code>' % {True: 'true', False: 'false', None: 'null'}[value]) elif isinstance(value, list): if any(isinstance(item, (list, dict)) for item in value): template = loader.get_template('rest_framework/admin/list_value.html') else: template = loader.get_template('rest_framework/admin/simple_list_value.html') context = {'value': value} return template.render(context) elif isinstance(value, dict): template = loader.get_template('rest_framework/admin/dict_value.html') context = {'value': value} return template.render(context) elif isinstance(value, str): if ( (value.startswith('http:') or value.startswith('https:') or value.startswith('/')) and not re.search(r'\s', value) ): return mark_safe('<a href="{value}">{value}</a>'.format(value=escape(value))) elif '@' in value and not re.search(r'\s', value): return mark_safe('<a href="mailto:{value}">{value}</a>'.format(value=escape(value))) elif '\n' in value: return mark_safe('<pre>%s</pre>' % escape(value)) return str(value) @register.filter def items(value): """ Simple filter to return the items of the dict. Useful when the dict may have a key 'items' which is resolved first in Django template dot-notation lookup. See issue #4931 Also see: https://stackoverflow.com/questions/15416662/django-template-loop-over-dictionary-items-with-items-as-key """ if value is None: # `{% for k, v in value.items %}` doesn't raise when value is None or # not in the context, so neither should `{% for k, v in value|items %}` return [] return value.items() @register.filter def data(value): """ Simple filter to access `data` attribute of object, specifically coreapi.Document. As per `items` filter above, allows accessing `document.data` when Document contains Link keyed-at "data". See issue #5395 """ return value.data @register.filter def schema_links(section, sec_key=None): """ Recursively find every link in a schema, even nested. """ NESTED_FORMAT = '%s > %s' # this format is used in docs/js/api.js:normalizeKeys links = section.links if section.data: data = section.data.items() for sub_section_key, sub_section in data: new_links = schema_links(sub_section, sec_key=sub_section_key) links.update(new_links) if sec_key is not None: new_links = {} for link_key, link in links.items(): new_key = NESTED_FORMAT % (sec_key, link_key) new_links.update({new_key: link}) return new_links return links @register.filter def add_nested_class(value): if isinstance(value, dict): return 'class=nested' if isinstance(value, list) and any(isinstance(item, (list, dict)) for item in value): return 'class=nested' return '' # Bunch of stuff cloned from urlize TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)', '"', "']", "'}", "'"] WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('&lt;', '&gt;'), ('"', '"'), ("'", "'")] word_split_re = re.compile(r'(\s+)') simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE) simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE) simple_email_re = re.compile(r'^\S+@\S+\.\S+$') def smart_urlquote_wrapper(matched_url): """ Simple wrapper for smart_urlquote. ValueError("Invalid IPv6 URL") can be raised here, see issue #1386 """ try: return smart_urlquote(matched_url) except ValueError: return None
CodeNode
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/engine/base.py
{ "start": 120013, "end": 122283 }
class ____(log.Identified): _sa_propagate_class_events = False dispatch: dispatcher[ConnectionEventsTarget] _compiled_cache: Optional[CompiledCacheType] dialect: Dialect pool: Pool url: URL hide_parameters: bool echo: log.echo_property def __init__( self, proxied: Engine, execution_options: CoreExecuteOptionsParameter ): self._proxied = proxied self.url = proxied.url self.dialect = proxied.dialect self.logging_name = proxied.logging_name self.echo = proxied.echo self._compiled_cache = proxied._compiled_cache self.hide_parameters = proxied.hide_parameters log.instance_logger(self, echoflag=self.echo) # note: this will propagate events that are assigned to the parent # engine after this OptionEngine is created. Since we share # the events of the parent we also disallow class-level events # to apply to the OptionEngine class directly. # # the other way this can work would be to transfer existing # events only, using: # self.dispatch._update(proxied.dispatch) # # that might be more appropriate however it would be a behavioral # change for logic that assigns events to the parent engine and # would like it to take effect for the already-created sub-engine. self.dispatch = self.dispatch._join(proxied.dispatch) self._execution_options = proxied._execution_options self.update_execution_options(**execution_options) def update_execution_options(self, **opt: Any) -> None: raise NotImplementedError() if not typing.TYPE_CHECKING: # https://github.com/python/typing/discussions/1095 @property def pool(self) -> Pool: return self._proxied.pool @pool.setter def pool(self, pool: Pool) -> None: self._proxied.pool = pool @property def _has_events(self) -> bool: return self._proxied._has_events or self.__dict__.get( "_has_events", False ) @_has_events.setter def _has_events(self, value: bool) -> None: self.__dict__["_has_events"] = value
OptionEngineMixin
python
openai__openai-python
tests/test_transform.py
{ "start": 8326, "end": 8911 }
class ____(TypedDict, total=False): required_prop: Required[Annotated[date, PropertyInfo(format="iso8601", alias="prop")]] @parametrize @pytest.mark.asyncio async def test_datetime_with_alias(use_async: bool) -> None: assert await transform({"required_prop": None}, DateDictWithRequiredAlias, use_async) == {"prop": None} # type: ignore[comparison-overlap] assert await transform( {"required_prop": date.fromisoformat("2023-02-23")}, DateDictWithRequiredAlias, use_async ) == {"prop": "2023-02-23"} # type: ignore[comparison-overlap]
DateDictWithRequiredAlias
python
pytorch__pytorch
benchmarks/tensorexpr/broadcast.py
{ "start": 2021, "end": 2251 }
class ____(BroadcastMulBench): def __init__(self, mode, device, dtype, M, N, K): super().__init__(mode, device, dtype, "row", M, N, K) @staticmethod def module(): return "broadcast_row"
BroadcastRowBench
python
tornadoweb__tornado
tornado/test/ioloop_test.py
{ "start": 20513, "end": 22181 }
class ____(unittest.TestCase): def setUp(self): self.io_loop = IOLoop(make_current=False) def tearDown(self): self.io_loop.close() def test_sync_result(self): with self.assertRaises(gen.BadYieldError): self.io_loop.run_sync(lambda: 42) def test_sync_exception(self): with self.assertRaises(ZeroDivisionError): self.io_loop.run_sync(lambda: 1 / 0) def test_async_result(self): @gen.coroutine def f(): yield gen.moment raise gen.Return(42) self.assertEqual(self.io_loop.run_sync(f), 42) def test_async_exception(self): @gen.coroutine def f(): yield gen.moment 1 / 0 with self.assertRaises(ZeroDivisionError): self.io_loop.run_sync(f) def test_current(self): def f(): self.assertIs(IOLoop.current(), self.io_loop) self.io_loop.run_sync(f) def test_timeout(self): @gen.coroutine def f(): yield gen.sleep(1) self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01) def test_native_coroutine(self): @gen.coroutine def f1(): yield gen.moment async def f2(): await f1() self.io_loop.run_sync(f2) def test_stop_no_timeout(self): async def f(): await asyncio.sleep(0.1) IOLoop.current().stop() await asyncio.sleep(10) with self.assertRaises(RuntimeError) as cm: self.io_loop.run_sync(f) assert "Event loop stopped" in str(cm.exception)
TestIOLoopRunSync
python
pandas-dev__pandas
pandas/io/parsers/readers.py
{ "start": 3722, "end": 4025 }
class ____(TypedDict): na_filter: Literal[True] low_memory: Literal[True] memory_map: Literal[False] float_precision: None _c_parser_defaults: _C_Parser_Defaults = { "na_filter": True, "low_memory": True, "memory_map": False, "float_precision": None, }
_C_Parser_Defaults
python
Pylons__pyramid
tests/test_security.py
{ "start": 1906, "end": 2507 }
class ____(unittest.TestCase): def _getTargetClass(self): from pyramid.security import Denied return Denied def _makeOne(self, *arg, **kw): klass = self._getTargetClass() return klass(*arg, **kw) def test_it(self): denied = self._makeOne('hello') self.assertEqual(denied.msg, 'hello') self.assertEqual(denied, False) self.assertFalse(denied) self.assertEqual(str(denied), 'hello') self.assertTrue('<Denied instance at ' in repr(denied)) self.assertTrue("with msg 'hello'>" in repr(denied))
TestDenied
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/symly/package.py
{ "start": 240, "end": 1250 }
class ____(Package): """A toy package full of symlinks.""" homepage = "https://www.example.com" has_code = False version("3.0.0") def install(self, spec, prefix): symly_c = """ #include <stdio.h> int main() { printf("I'm just here to give the build system something to do..."); return 0; } """ mkdirp("%s/symly" % self.stage.source_path) with open("%s/symly/symly.c" % self.stage.source_path, "w", encoding="utf-8") as f: f.write(symly_c) gcc = which("/usr/bin/gcc") if sys.platform == "darwin": gcc = which("/usr/bin/clang") mkdirp(prefix.bin) mkdirp(prefix.lib64) gcc("-o", "symly.bin", "symly/symly.c") print("prefix.bin", prefix.bin) copy("symly.bin", "%s/symly" % prefix.bin) # create a symlinked file. os.symlink("%s/symly" % prefix.bin, "%s/symly" % prefix.lib64) # Create a symlinked directory. os.symlink(prefix.bin, prefix.include)
Symly
python
pandas-dev__pandas
pandas/tests/indexes/datetimes/methods/test_fillna.py
{ "start": 66, "end": 2004 }
class ____: @pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"]) def test_fillna_datetime64(self, tz): # GH 11343 idx = pd.DatetimeIndex(["2011-01-01 09:00", pd.NaT, "2011-01-01 11:00"]) exp = pd.DatetimeIndex( ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"] ) tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00")), exp) # tz mismatch exp = pd.Index( [ pd.Timestamp("2011-01-01 09:00"), pd.Timestamp("2011-01-01 10:00", tz=tz), pd.Timestamp("2011-01-01 11:00"), ], dtype=object, ) tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00", tz=tz)), exp) # object exp = pd.Index( [pd.Timestamp("2011-01-01 09:00"), "x", pd.Timestamp("2011-01-01 11:00")], dtype=object, ) tm.assert_index_equal(idx.fillna("x"), exp) idx = pd.DatetimeIndex(["2011-01-01 09:00", pd.NaT, "2011-01-01 11:00"], tz=tz) exp = pd.DatetimeIndex( ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], tz=tz ) tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00", tz=tz)), exp) exp = pd.Index( [ pd.Timestamp("2011-01-01 09:00", tz=tz), pd.Timestamp("2011-01-01 10:00"), pd.Timestamp("2011-01-01 11:00", tz=tz), ], dtype=object, ) tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00")), exp) # object exp = pd.Index( [ pd.Timestamp("2011-01-01 09:00", tz=tz), "x", pd.Timestamp("2011-01-01 11:00", tz=tz), ], dtype=object, ) tm.assert_index_equal(idx.fillna("x"), exp)
TestDatetimeIndexFillNA
python
kamyu104__LeetCode-Solutions
Python/find-subarrays-with-equal-sum.py
{ "start": 42, "end": 372 }
class ____(object): def findSubarrays(self, nums): """ :type nums: List[int] :rtype: bool """ lookup = set() for i in xrange(len(nums)-1): if nums[i]+nums[i+1] in lookup: return True lookup.add(nums[i]+nums[i+1]) return False
Solution
python
getsentry__sentry
src/sentry/codecov/endpoints/repository_tokens/repository_tokens.py
{ "start": 976, "end": 4375 }
class ____(CodecovEndpoint): owner = ApiOwner.CODECOV publish_status = { "GET": ApiPublishStatus.PUBLIC, } @extend_schema( operation_id="Retrieves a paginated list of repository tokens for a given owner", parameters=[ GlobalParams.ORG_ID_OR_SLUG, PreventParams.OWNER, PreventParams.LIMIT, PreventParams.NAVIGATION, PreventParams.CURSOR, PreventParams.TOKENS_SORT_BY, ], request=None, responses={ 200: RepositoryTokensSerializer, 400: RESPONSE_BAD_REQUEST, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, ) def get(self, request: Request, owner: RpcIntegration, **kwargs) -> Response: """ Retrieves a paginated list of repository tokens for a given owner. """ navigation = request.query_params.get("navigation", NavigationParameter.NEXT.value) limit_param = request.query_params.get("limit", MAX_RESULTS_PER_PAGE) cursor = request.query_params.get("cursor") sort_by = request.query_params.get("sortBy", "-COMMIT_DATE") # Validate sort parameters valid_sort_fields = {"COMMIT_DATE", "NAME"} if sort_by.startswith("-"): sort_field = sort_by[1:] ordering_direction = OrderingDirection.DESC.value else: sort_field = sort_by ordering_direction = OrderingDirection.ASC.value if sort_field not in valid_sort_fields: return Response( status=status.HTTP_400_BAD_REQUEST, data={ "details": f"Invalid sortBy parameter. Allowed values: {', '.join(sorted(valid_sort_fields))}" }, ) sort_by = sort_field owner_slug = owner.name # When calling request.query_params, the URL is decoded so + is replaced with spaces. We need to change them back so Codecov can properly fetch the next page. if cursor: cursor = cursor.replace(" ", "+") try: limit = int(limit_param) except ValueError: return Response( status=status.HTTP_400_BAD_REQUEST, data={"details": "provided `limit` parameter must be a positive integer"}, ) if limit <= 0: return Response( status=status.HTTP_400_BAD_REQUEST, data={"details": "provided `limit` parameter must be a positive integer"}, ) variables = { "owner": owner_slug, "direction": ordering_direction, "ordering": sort_by, "first": limit if navigation != NavigationParameter.PREV.value else None, "last": limit if navigation == NavigationParameter.PREV.value else None, "before": cursor if cursor and navigation == NavigationParameter.PREV.value else None, "after": cursor if cursor and navigation == NavigationParameter.NEXT.value else None, } client = CodecovApiClient(git_provider_org=owner_slug) graphql_response = client.query(query=query, variables=variables) repository_tokens = RepositoryTokensSerializer().to_representation(graphql_response.json()) return Response(repository_tokens)
RepositoryTokensEndpoint
python
joke2k__faker
faker/providers/color/hr_HR/__init__.py
{ "start": 98, "end": 6247 }
class ____(ColorProvider): """Implement color provider for ``hr_HR`` locale.""" all_colors = OrderedDict( ( ("Akvamarin", "#7FFFD4"), ("Antikna bijela", "#FAEBD7"), ("Azurna", "#F0FFFF"), ("Bež", "#F5F5DC"), ("Bijela", "#FFFFFF"), ("Bijelo bilje", "#FFFAF0"), ("Bjelokost", "#FFFFF0"), ("Blijeda kudelja", "#EEE8AA"), ("Blijedi badem", "#FFEBCD"), ("Blijedoljubičasta", "#DB7093"), ("Blijedotirkizna", "#AFEEEE"), ("Blijedozelena", "#98FB98"), ("Breskva", "#FFDAB9"), ("Brončana", "#D2B48C"), ("Čeličnoplava", "#4682B4"), ("Čičak", "#D8BFD8"), ("Cijan", "#00FFFF"), ("Čipka", "#FDF5E6"), ("Čokoladna", "#D2691E"), ("Crna", "#000000"), ("Crvena", "#FF0000"), ("Dim", "#F5F5F5"), ("Dodger plava", "#1E90FF"), ("Duboko ružičasta", "#FF1493"), ("Fuksija", "#FF00FF"), ("Gainsboro", "#DCDCDC"), ("Grimizna", "#DC143C"), ("Indigo", "#4B0082"), ("Jelenska koža", "#FFE4B5"), ("Kadetski plava", "#5F9EA0"), ("Kestenjasta", "#800000"), ("Koraljna", "#FF7F50"), ("Kraljevski plava", "#4169E1"), ("Kudelja", "#DAA520"), ("Lan", "#FAF0E6"), ("Lavanda", "#E6E6FA"), ("Limun", "#FFFACD"), ("Lipa", "#00FF00"), ("Ljubičasta", "#EE82EE"), ("Magenta", "#FF00FF"), ("Maslinasta", "#808000"), ("Medljika", "#F0FFF0"), ("Menta", "#F5FFFA"), ("Modro nebo", "#00BFFF"), ("Modrozelena", "#008080"), ("Mornarska", "#000080"), ("Morskozelena", "#2E8B57"), ("Mračno siva", "#696969"), ("Narančasta", "#FFA500"), ("Narančastocrvena", "#FF4500"), ("Narančastoružičasta", "#FA8072"), ("Noćno plava", "#191970"), ("Orhideja", "#DA70D6"), ("Papaja", "#FFEFD5"), ("Peru", "#CD853F"), ("Plava", "#0000FF"), ("Plavi prah", "#B0E0E6"), ("Plavi škriljevac", "#6A5ACD"), ("Plavkasta", "#F0F8FF"), ("Plavo cvijeće", "#6495ED"), ("Plavo nebo", "#87CEEB"), ("Plavoljubičasta", "#8A2BE2"), ("Porculanska", "#FFE4C4"), ("Prljavomaslinasta", "#6B8E23"), ("Proljetnozelena", "#00FF7F"), ("Prozirno bijela", "#F8F8FF"), ("Pšenica", "#F5DEB3"), ("Purpurna", "#800080"), ("Rajčica", "#FF6347"), ("Rumena lavanda", "#FFF0F5"), ("Ružičasta", "#FFC0CB"), ("Ružičastosmeđa", "#BC8F8F"), ("Siva", "#808080"), ("Sivi škriljevac", "#708090"), ("Sivožuta", "#F0E68C"), ("Smeđa", "#A52A2A"), ("Smeđe sedlo", "#8B4513"), ("Smeđi pijesak", "#F4A460"), ("Smeđkasto bijela", "#FFDEAD"), ("Snijeg", "#FFFAFA"), ("Srebrna", "#C0C0C0"), ("Srednja akvamarin", "#66CDAA"), ("Srednja crvenoljubičasta", "#C71585"), ("Srednja morskozelena", "#3CB371"), ("Srednja orhideja", "#BA55D3"), ("Srednja plava", "#0000CD"), ("Srednja proljetnozelena", "#00FA9A"), ("Srednja purpurna", "#9370DB"), ("Srednja tirkizna", "#48D1CC"), ("Srednje plavi škriljevac", "#7B68EE"), ("Svijetla čeličnoplava", "#B0C4DE"), ("Svijetla narančastoružičasta", "#FFA07A"), ("Svijetli cijan", "#E0FFFF"), ("Svijetlo drvo", "#DEB887"), ("Svijetlokoraljna", "#F08080"), ("Svijetlomorskozelena", "#20B2AA"), ("Svijetloplava", "#ADD8E6"), ("Svijetloružičasta", "#FFB6C1"), ("Svijetlosiva", "#D3D3D3"), ("Svijetlosivi škriljevac", "#778899"), ("Svijetlozelena", "#90EE90"), ("Svijetložuta kudelja", "#FAFAD2"), ("Svijetložuta", "#FFFFE0"), ("Šamotna opeka", "#B22222"), ("Školjka", "#FFF5EE"), ("Šljiva", "#DDA0DD"), ("Tamna kudelja", "#B8860B"), ("Tamna magenta", "#8B008B"), ("Tamna narančastoružičasta", "#E9967A"), ("Tamna orhideja", "#9932CC"), ("Tamna sivožuta", "#BDB76B"), ("Tamni cijan", "#008B8B"), ("Tamno zelena", "#006400"), ("Tamnocrvena", "#8B0000"), ("Tamnoljubičasta", "#9400D3"), ("Tamnomaslinasta", "#556B2F"), ("Tamnonarančasta", "#FF8C00"), ("Tamnoplava", "#00008B"), ("Tamnoplavi škriljevac", "#483D8B"), ("Tamnosiva", "#A9A9A9"), ("Tamnosivi škriljevac", "#2F4F4F"), ("Tamnotirkizna", "#00CED1"), ("Tamnozelena", "#8FBC8F"), ("Tirkizna", "#40E0D0"), ("Topla ružičasta", "#FF69B4"), ("Vedro nebo", "#87CEFA"), ("Voda", "#00FFFF"), ("Zelena lipa", "#32CD32"), ("Zelena šuma", "#228B22"), ("Zelena tratina", "#7CFC00"), ("Zelena", "#008000"), ("Zeleni liker", "#7FFF00"), ("Zelenožuta", "#ADFF2F"), ("Zlatna", "#FFD700"), ("Žućkastocrvena zemlja", "#CD5C5C"), ("Žućkastoružičasta", "#FFE4E1"), ("Žućkastosmeđa glina", "#A0522D"), ("Žuta svila", "#FFF8DC"), ("Žuta", "#FFFF00"), ("Žutozelena", "#9ACD32"), ) ) safe_colors = ( "crna", "kestenjasta", "zelena", "mornarska", "maslinasta", "purpurna", "modrozelena", "lipa", "plava", "srebrna", "siva", "žuta", "fuksija", "voda", "bijela", )
Provider
python
coleifer__peewee
tests/sqlite.py
{ "start": 2894, "end": 2976 }
class ____(TestModel): rowid = RowIDField() data = IntegerField()
RowIDModel
python
tensorflow__tensorflow
tensorflow/python/distribute/distribute_lib.py
{ "start": 42264, "end": 87156 }
class ____(object): """A state & compute distribution policy on a list of devices. See [the guide](https://www.tensorflow.org/guide/distributed_training) for overview and examples. See `tf.distribute.StrategyExtended` and [`tf.distribute`](https://www.tensorflow.org/api_docs/python/tf/distribute) for a glossary of concepts mentioned on this page such as "per-replica", _replica_, and _reduce_. In short: * To use it with Keras `compile`/`fit`, [please read](https://www.tensorflow.org/guide/distributed_training#using_tfdistributestrategy_with_keras). * Otherwise, use `tf.distribute.Strategy.scope` to specify that a strategy should be used when building an executing your model. (This puts you in the "cross-replica context" for this strategy, which means the strategy is put in control of things like variable placement.) * If you are writing a custom training loop, you will need to call a few more methods, [see the guide](https://www.tensorflow.org/guide/distributed_training#using_tfdistributestrategy_with_custom_training_loops): * Start by creating a `tf.data.Dataset` normally. * Use `tf.distribute.Strategy.experimental_distribute_dataset` to convert a `tf.data.Dataset` to something that produces "per-replica" values. If you want to manually specify how the dataset should be partitioned across replicas, use `tf.distribute.Strategy.distribute_datasets_from_function` instead. * Use `tf.distribute.Strategy.run` to run a function once per replica, taking values that may be "per-replica" (e.g. from a `tf.distribute.DistributedDataset` object) and returning "per-replica" values. This function is executed in "replica context", which means each operation is performed separately on each replica. * Finally use a method (such as `tf.distribute.Strategy.reduce`) to convert the resulting "per-replica" values into ordinary `Tensor`s. A custom training loop can be as simple as: ``` with my_strategy.scope(): @tf.function def distribute_train_epoch(dataset): def replica_fn(input): # process input and return result return result total_result = 0 for x in dataset: per_replica_result = my_strategy.run(replica_fn, args=(x,)) total_result += my_strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_result, axis=None) return total_result dist_dataset = my_strategy.experimental_distribute_dataset(dataset) for _ in range(EPOCHS): train_result = distribute_train_epoch(dist_dataset) ``` This takes an ordinary `dataset` and `replica_fn` and runs it distributed using a particular `tf.distribute.Strategy` named `my_strategy` above. Any variables created in `replica_fn` are created using `my_strategy`'s policy, and library functions called by `replica_fn` can use the `get_replica_context()` API to implement distributed-specific behavior. You can use the `reduce` API to aggregate results across replicas and use this as a return value from one iteration over a `tf.distribute.DistributedDataset`. Or you can use `tf.keras.metrics` (such as loss, accuracy, etc.) to accumulate metrics across steps in a given epoch. See the [custom training loop tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training) for a more detailed example. Note: `tf.distribute.Strategy` currently does not support TensorFlow's partitioned variables (where a single variable is split across multiple devices) at this time. """ # pylint: enable=line-too-long # TODO(josh11b): Partitioned computations, state; sharding # TODO(josh11b): Model parallelism: "replicas" with multiple devices; shuffling def __init__(self, extended): self._extended = extended self._scale_loss_for_estimator = False if not hasattr(extended, "_retrace_functions_for_each_device"): # pylint: disable=protected-access # `extended._retrace_functions_for_each_device` dictates # whether the same function will be retraced when it is called on # different devices. try: extended._retrace_functions_for_each_device = ( len(extended.worker_devices) > 1) distribution_strategy_replica_gauge.get_cell("num_replicas").set( self.num_replicas_in_sync) except: # pylint: disable=bare-except # Default for the case where extended.worker_devices can't return # a sensible value. extended._retrace_functions_for_each_device = True # Below are the dicts of axis(int) -> `tf.function`. self._mean_reduce_helper_fns = {} self._reduce_sum_fns = {} # Whether this strategy is designed to work with `ClusterCoordinator`. self._should_use_with_coordinator = False @property def extended(self): """`tf.distribute.StrategyExtended` with additional methods.""" return self._extended @tf_contextlib.contextmanager def _scale_loss_for_estimator_enabled(self): """Scope which sets a flag used for scaling losses in optimizer. Yields: `_scale_loss_for_estimator_enabled` is a context manager with a side effect, but doesn't return a value. """ self._scale_loss_for_estimator = True try: yield finally: self._scale_loss_for_estimator = False # pylint: disable=line-too-long def scope(self): """Context manager to make the strategy current and distribute variables. This method returns a context manager, and is used as follows: >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) >>> # Variable created inside scope: >>> with strategy.scope(): ... mirrored_variable = tf.Variable(1.) >>> mirrored_variable MirroredVariable:{ 0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0>, 1: <tf.Variable 'Variable/replica_1:0' shape=() dtype=float32, numpy=1.0> } >>> # Variable created outside scope: >>> regular_variable = tf.Variable(1.) >>> regular_variable <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0> _What happens when Strategy.scope is entered?_ * `strategy` is installed in the global context as the "current" strategy. Inside this scope, `tf.distribute.get_strategy()` will now return this strategy. Outside this scope, it returns the default no-op strategy. * Entering the scope also enters the "cross-replica context". See `tf.distribute.StrategyExtended` for an explanation on cross-replica and replica contexts. * Variable creation inside `scope` is intercepted by the strategy. Each strategy defines how it wants to affect the variable creation. Sync strategies like `MirroredStrategy`, `TPUStrategy` and `MultiWorkerMiroredStrategy` create variables replicated on each replica, whereas `ParameterServerStrategy` creates variables on the parameter servers. This is done using a custom `tf.variable_creator_scope`. * In some strategies, a default device scope may also be entered: in `MultiWorkerMiroredStrategy`, a default device scope of "/CPU:0" is entered on each worker. Note: Entering a scope does not automatically distribute a computation, except in the case of high level training framework like keras `model.fit`. If you're not using `model.fit`, you need to use `strategy.run` API to explicitly distribute that computation. See an example in the [custom training loop tutorial](https://www.tensorflow.org/tutorials/distribute/custom_training). _What should be in scope and what should be outside?_ There are a number of requirements on what needs to happen inside the scope. However, in places where we have information about which strategy is in use, we often enter the scope for the user, so they don't have to do it explicitly (i.e. calling those either inside or outside the scope is OK). * Anything that creates variables that should be distributed variables must be called in a `strategy.scope`. This can be accomplished either by directly calling the variable creating function within the scope context, or by relying on another API like `strategy.run` or `keras.Model.fit` to automatically enter it for you. Any variable that is created outside scope will not be distributed and may have performance implications. Some common objects that create variables in TF are Models, Optimizers, Metrics. Such objects should always be initialized in the scope, and any functions that may lazily create variables (e.g., `Model.__call__()`, tracing a `tf.function`, etc.) should similarly be called within scope. Another source of variable creation can be a checkpoint restore - when variables are created lazily. Note that any variable created inside a strategy captures the strategy information. So reading and writing to these variables outside the `strategy.scope` can also work seamlessly, without the user having to enter the scope. * Some strategy APIs (such as `strategy.run` and `strategy.reduce`) which require to be in a strategy's scope, enter the scope automatically, which means when using those APIs you don't need to explicitly enter the scope yourself. * When a `tf.keras.Model` is created inside a `strategy.scope`, the Model object captures the scope information. When high level training framework methods such as `model.compile`, `model.fit`, etc. are then called, the captured scope will be automatically entered, and the associated strategy will be used to distribute the training etc. See a detailed example in [distributed keras tutorial](https://www.tensorflow.org/tutorials/distribute/keras). WARNING: Simply calling `model(..)` does not automatically enter the captured scope -- only high level training framework APIs support this behavior: `model.compile`, `model.fit`, `model.evaluate`, `model.predict` and `model.save` can all be called inside or outside the scope. * The following can be either inside or outside the scope: * Creating the input datasets * Defining `tf.function`s that represent your training step * Saving APIs such as `tf.saved_model.save`. Loading creates variables, so that should go inside the scope if you want to train the model in a distributed way. * Checkpoint saving. As mentioned above - `checkpoint.restore` may sometimes need to be inside scope if it creates variables. Returns: A context manager. """ return self._extended._scope(self) # pylint: disable=protected-access # pylint: enable=line-too-long @doc_controls.do_not_doc_inheritable # DEPRECATED, moving to `extended` @deprecated(None, "use extended.colocate_vars_with() instead.") def colocate_vars_with(self, colocate_with_variable): """DEPRECATED: use extended.colocate_vars_with() instead.""" return self._extended.colocate_vars_with(colocate_with_variable) @doc_controls.do_not_generate_docs # DEPRECATED: TF 1.x only def make_dataset_iterator(self, dataset): """DEPRECATED TF 1.x ONLY.""" return self._extended._make_dataset_iterator(dataset) # pylint: disable=protected-access @doc_controls.do_not_generate_docs # DEPRECATED: TF 1.x only def make_input_fn_iterator(self, input_fn, replication_mode=InputReplicationMode.PER_WORKER): """DEPRECATED TF 1.x ONLY.""" if replication_mode != InputReplicationMode.PER_WORKER: raise ValueError( "Input replication mode not supported: %r" % replication_mode) with self.scope(): return self.extended._make_input_fn_iterator( # pylint: disable=protected-access input_fn, replication_mode=replication_mode) @doc_controls.do_not_generate_docs # DEPRECATED: TF 1.x only @deprecated(None, "use run() instead") def experimental_run(self, fn, input_iterator=None): """DEPRECATED TF 1.x ONLY.""" with self.scope(): args = (input_iterator.get_next(),) if input_iterator is not None else () return self.run(fn, args=args) def experimental_distribute_dataset(self, dataset, options=None): # pylint: disable=line-too-long """Creates `tf.distribute.DistributedDataset` from `tf.data.Dataset`. The returned `tf.distribute.DistributedDataset` can be iterated over similar to regular datasets. NOTE: The user cannot add any more transformations to a `tf.distribute.DistributedDataset`. You can only create an iterator or examine the `tf.TypeSpec` of the data generated by it. See API docs of `tf.distribute.DistributedDataset` to learn more. The following is an example: >>> global_batch_size = 2 >>> # Passing the devices is optional. ... strategy = tf.distribute.MirroredStrategy(devices=["GPU:0", "GPU:1"]) >>> # Create a dataset ... dataset = tf.data.Dataset.range(4).batch(global_batch_size) >>> # Distribute that dataset ... dist_dataset = strategy.experimental_distribute_dataset(dataset) >>> @tf.function ... def replica_fn(input): ... return input*2 >>> result = [] >>> # Iterate over the `tf.distribute.DistributedDataset` ... for x in dist_dataset: ... # process dataset elements ... result.append(strategy.run(replica_fn, args=(x,))) >>> print(result) [PerReplica:{ 0: <tf.Tensor: shape=(1,), dtype=int64, numpy=array([0])>, 1: <tf.Tensor: shape=(1,), dtype=int64, numpy=array([2])> }, PerReplica:{ 0: <tf.Tensor: shape=(1,), dtype=int64, numpy=array([4])>, 1: <tf.Tensor: shape=(1,), dtype=int64, numpy=array([6])> }] Three key actions happening under the hood of this method are batching, sharding, and prefetching. In the code snippet above, `dataset` is batched by `global_batch_size`, and calling `experimental_distribute_dataset` on it rebatches `dataset` to a new batch size that is equal to the global batch size divided by the number of replicas in sync. We iterate through it using a Pythonic for loop. `x` is a `tf.distribute.DistributedValues` containing data for all replicas, and each replica gets data of the new batch size. `tf.distribute.Strategy.run` will take care of feeding the right per-replica data in `x` to the right `replica_fn` executed on each replica. Sharding contains autosharding across multiple workers and within every worker. First, in multi-worker distributed training (i.e. when you use `tf.distribute.experimental.MultiWorkerMirroredStrategy` or `tf.distribute.TPUStrategy`), autosharding a dataset over a set of workers means that each worker is assigned a subset of the entire dataset (if the right `tf.data.experimental.AutoShardPolicy` is set). This is to ensure that at each step, a global batch size of non-overlapping dataset elements will be processed by each worker. Autosharding has a couple of different options that can be specified using `tf.data.experimental.DistributeOptions`. Then, sharding within each worker means the method will split the data among all the worker devices (if more than one a present). This will happen regardless of multi-worker autosharding. Note: for autosharding across multiple workers, the default mode is `tf.data.experimental.AutoShardPolicy.AUTO`. This mode will attempt to shard the input dataset by files if the dataset is being created out of reader datasets (e.g. `tf.data.TFRecordDataset`, `tf.data.TextLineDataset`, etc.) or otherwise shard the dataset by data, where each of the workers will read the entire dataset and only process the shard assigned to it. However, if you have less than one input file per worker, we suggest that you disable dataset autosharding across workers by setting the `tf.data.experimental.DistributeOptions.auto_shard_policy` to be `tf.data.experimental.AutoShardPolicy.OFF`. By default, this method adds a prefetch transformation at the end of the user provided `tf.data.Dataset` instance. The argument to the prefetch transformation which is `buffer_size` is equal to the number of replicas in sync. If the above batch splitting and dataset sharding logic is undesirable, please use `tf.distribute.Strategy.distribute_datasets_from_function` instead, which does not do any automatic batching or sharding for you. Note: If you are using TPUStrategy, the order in which the data is processed by the workers when using `tf.distribute.Strategy.experimental_distribute_dataset` or `tf.distribute.Strategy.distribute_datasets_from_function` is not guaranteed. This is typically required if you are using `tf.distribute` to scale prediction. You can however insert an index for each element in the batch and order outputs accordingly. Refer to [this snippet](https://www.tensorflow.org/tutorials/distribute/input#caveats) for an example of how to order outputs. Note: Stateful dataset transformations are currently not supported with `tf.distribute.experimental_distribute_dataset` or `tf.distribute.distribute_datasets_from_function`. Any stateful ops that the dataset may have are currently ignored. For example, if your dataset has a `map_fn` that uses `tf.random.uniform` to rotate an image, then you have a dataset graph that depends on state (i.e the random seed) on the local machine where the python process is being executed. For a tutorial on more usage and properties of this method, refer to the [tutorial on distributed input](https://www.tensorflow.org/tutorials/distribute/input#tfdistributestrategyexperimental_distribute_dataset). If you are interested in last partial batch handling, read [this section](https://www.tensorflow.org/tutorials/distribute/input#partial_batches). Args: dataset: `tf.data.Dataset` that will be sharded across all replicas using the rules stated above. options: `tf.distribute.InputOptions` used to control options on how this dataset is distributed. Returns: A `tf.distribute.DistributedDataset`. """ distribution_strategy_input_api_counter.get_cell( self.__class__.__name__, "distribute_dataset").increase_by(1) # pylint: enable=line-too-long return self._extended._experimental_distribute_dataset(dataset, options) # pylint: disable=protected-access def distribute_datasets_from_function(self, dataset_fn, options=None): # pylint: disable=line-too-long """Distributes `tf.data.Dataset` instances created by calls to `dataset_fn`. The argument `dataset_fn` that users pass in is an input function that has a `tf.distribute.InputContext` argument and returns a `tf.data.Dataset` instance. It is expected that the returned dataset from `dataset_fn` is already batched by per-replica batch size (i.e. global batch size divided by the number of replicas in sync) and sharded. `tf.distribute.Strategy.distribute_datasets_from_function` does not batch or shard the `tf.data.Dataset` instance returned from the input function. `dataset_fn` will be called on the CPU device of each of the workers and each generates a dataset where every replica on that worker will dequeue one batch of inputs (i.e. if a worker has two replicas, two batches will be dequeued from the `Dataset` every step). This method can be used for several purposes. First, it allows you to specify your own batching and sharding logic. (In contrast, `tf.distribute.experimental_distribute_dataset` does batching and sharding for you.) For example, where `experimental_distribute_dataset` is unable to shard the input files, this method might be used to manually shard the dataset (avoiding the slow fallback behavior in `experimental_distribute_dataset`). In cases where the dataset is infinite, this sharding can be done by creating dataset replicas that differ only in their random seed. The `dataset_fn` should take an `tf.distribute.InputContext` instance where information about batching and input replication can be accessed. You can use `element_spec` property of the `tf.distribute.DistributedDataset` returned by this API to query the `tf.TypeSpec` of the elements returned by the iterator. This can be used to set the `input_signature` property of a `tf.function`. Follow `tf.distribute.DistributedDataset.element_spec` to see an example. IMPORTANT: The `tf.data.Dataset` returned by `dataset_fn` should have a per-replica batch size, unlike `experimental_distribute_dataset`, which uses the global batch size. This may be computed using `input_context.get_per_replica_batch_size`. Note: If you are using TPUStrategy, the order in which the data is processed by the workers when using `tf.distribute.Strategy.experimental_distribute_dataset` or `tf.distribute.Strategy.distribute_datasets_from_function` is not guaranteed. This is typically required if you are using `tf.distribute` to scale prediction. You can however insert an index for each element in the batch and order outputs accordingly. Refer to [this snippet](https://www.tensorflow.org/tutorials/distribute/input#caveats) for an example of how to order outputs. Note: Stateful dataset transformations are currently not supported with `tf.distribute.experimental_distribute_dataset` or `tf.distribute.distribute_datasets_from_function`. Any stateful ops that the dataset may have are currently ignored. For example, if your dataset has a `map_fn` that uses `tf.random.uniform` to rotate an image, then you have a dataset graph that depends on state (i.e the random seed) on the local machine where the python process is being executed. For a tutorial on more usage and properties of this method, refer to the [tutorial on distributed input](https://www.tensorflow.org/tutorials/distribute/input#tfdistributestrategyexperimental_distribute_datasets_from_function)). If you are interested in last partial batch handling, read [this section](https://www.tensorflow.org/tutorials/distribute/input#partial_batches). Args: dataset_fn: A function taking a `tf.distribute.InputContext` instance and returning a `tf.data.Dataset`. options: `tf.distribute.InputOptions` used to control options on how this dataset is distributed. Returns: A `tf.distribute.DistributedDataset`. """ distribution_strategy_input_api_counter.get_cell( self.__class__.__name__, "distribute_datasets_from_function").increase_by(1) # pylint: enable=line-too-long return self._extended._distribute_datasets_from_function( # pylint: disable=protected-access dataset_fn, options) # TODO(b/162776748): Remove deprecated symbol. @doc_controls.do_not_doc_inheritable @deprecation.deprecated(None, "rename to distribute_datasets_from_function") def experimental_distribute_datasets_from_function(self, dataset_fn, options=None): return self.distribute_datasets_from_function(dataset_fn, options) def run(self, fn, args=(), kwargs=None, options=None): """Invokes `fn` on each replica, with the given arguments. This method is the primary way to distribute your computation with a tf.distribute object. It invokes `fn` on each replica. If `args` or `kwargs` have `tf.distribute.DistributedValues`, such as those produced by a `tf.distribute.DistributedDataset` from `tf.distribute.Strategy.experimental_distribute_dataset` or `tf.distribute.Strategy.distribute_datasets_from_function`, when `fn` is executed on a particular replica, it will be executed with the component of `tf.distribute.DistributedValues` that correspond to that replica. `fn` is invoked under a replica context. `fn` may call `tf.distribute.get_replica_context()` to access members such as `all_reduce`. Please see the module-level docstring of tf.distribute for the concept of replica context. All arguments in `args` or `kwargs` can be a nested structure of tensors, e.g. a list of tensors, in which case `args` and `kwargs` will be passed to the `fn` invoked on each replica. Or `args` or `kwargs` can be `tf.distribute.DistributedValues` containing tensors or composite tensors, i.e. `tf.compat.v1.TensorInfo.CompositeTensor`, in which case each `fn` call will get the component of a `tf.distribute.DistributedValues` corresponding to its replica. Note that arbitrary Python values that are not of the types above are not supported. IMPORTANT: Depending on the implementation of `tf.distribute.Strategy` and whether eager execution is enabled, `fn` may be called one or more times. If `fn` is annotated with `tf.function` or `tf.distribute.Strategy.run` is called inside a `tf.function` (eager execution is disabled inside a `tf.function` by default), `fn` is called once per replica to generate a Tensorflow graph, which will then be reused for execution with new inputs. Otherwise, if eager execution is enabled, `fn` will be called once per replica every step just like regular python code. Example usage: 1. Constant tensor input. >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) >>> tensor_input = tf.constant(3.0) >>> @tf.function ... def replica_fn(input): ... return input*2.0 >>> result = strategy.run(replica_fn, args=(tensor_input,)) >>> result PerReplica:{ 0: <tf.Tensor: shape=(), dtype=float32, numpy=6.0>, 1: <tf.Tensor: shape=(), dtype=float32, numpy=6.0> } 2. DistributedValues input. {: value=2} >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) >>> @tf.function ... def run(): ... def value_fn(value_context): ... return value_context.num_replicas_in_sync ... distributed_values = ( ... strategy.experimental_distribute_values_from_function( ... value_fn)) ... def replica_fn2(input): ... return input*2 ... return strategy.run(replica_fn2, args=(distributed_values,)) >>> result = run() >>> result <tf.Tensor: shape=(), dtype=int32, numpy=4> 3. Use `tf.distribute.ReplicaContext` to allreduce values. {: value=3} >>> strategy = tf.distribute.MirroredStrategy(["gpu:0", "gpu:1"]) >>> @tf.function ... def run(): ... def value_fn(value_context): ... return tf.constant(value_context.replica_id_in_sync_group) ... distributed_values = ( ... strategy.experimental_distribute_values_from_function( ... value_fn)) ... def replica_fn(input): ... return tf.distribute.get_replica_context().all_reduce( ... "sum", input) ... return strategy.run(replica_fn, args=(distributed_values,)) >>> result = run() >>> result PerReplica:{ 0: <tf.Tensor: shape=(), dtype=int32, numpy=1>, 1: <tf.Tensor: shape=(), dtype=int32, numpy=1> } Args: fn: The function to run on each replica. args: Optional positional arguments to `fn`. Its element can be a tensor, a nested structure of tensors or a `tf.distribute.DistributedValues`. kwargs: Optional keyword arguments to `fn`. Its element can be a tensor, a nested structure of tensors or a `tf.distribute.DistributedValues`. options: An optional instance of `tf.distribute.RunOptions` specifying the options to run `fn`. Returns: Merged return value of `fn` across replicas. The structure of the return value is the same as the return value from `fn`. Each element in the structure can either be `tf.distribute.DistributedValues`, `Tensor` objects, or `Tensor`s (for example, if running on a single replica). """ del options if not isinstance(args, (list, tuple)): raise ValueError( "positional args must be a list or tuple, got {}".format(type(args))) with self.scope(): # tf.distribute supports Eager functions, so AutoGraph should not be # applied when the caller is also in Eager mode. fn = autograph.tf_convert( fn, autograph_ctx.control_status_ctx(), convert_by_default=False) return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs) def reduce(self, reduce_op, value, axis): """Reduce `value` across replicas and return result on current device. >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) >>> def step_fn(): ... i = tf.distribute.get_replica_context().replica_id_in_sync_group ... return tf.identity(i) >>> >>> per_replica_result = strategy.run(step_fn) >>> total = strategy.reduce("SUM", per_replica_result, axis=None) >>> total <tf.Tensor: shape=(), dtype=int32, numpy=1> To see how this would look with multiple replicas, consider the same example with MirroredStrategy with 2 GPUs: ```python strategy = tf.distribute.MirroredStrategy(devices=["GPU:0", "GPU:1"]) def step_fn(): i = tf.distribute.get_replica_context().replica_id_in_sync_group return tf.identity(i) per_replica_result = strategy.run(step_fn) # Check devices on which per replica result is: strategy.experimental_local_results(per_replica_result)[0].device # /job:localhost/replica:0/task:0/device:GPU:0 strategy.experimental_local_results(per_replica_result)[1].device # /job:localhost/replica:0/task:0/device:GPU:1 total = strategy.reduce("SUM", per_replica_result, axis=None) # Check device on which reduced result is: total.device # /job:localhost/replica:0/task:0/device:CPU:0 ``` This API is typically used for aggregating the results returned from different replicas, for reporting etc. For example, loss computed from different replicas can be averaged using this API before printing. Note: The result is copied to the "current" device - which would typically be the CPU of the worker on which the program is running. For `TPUStrategy`, it is the first TPU host. For multi client `MultiWorkerMirroredStrategy`, this is CPU of each worker. There are a number of different tf.distribute APIs for reducing values across replicas: * `tf.distribute.ReplicaContext.all_reduce`: This differs from `Strategy.reduce` in that it is for replica context and does not copy the results to the host device. `all_reduce` should be typically used for reductions inside the training step such as gradients. * `tf.distribute.StrategyExtended.reduce_to` and `tf.distribute.StrategyExtended.batch_reduce_to`: These APIs are more advanced versions of `Strategy.reduce` as they allow customizing the destination of the result. They are also called in cross replica context. _What should axis be?_ Given a per-replica value returned by `run`, say a per-example loss, the batch will be divided across all the replicas. This function allows you to aggregate across replicas and optionally also across batch elements by specifying the axis parameter accordingly. For example, if you have a global batch size of 8 and 2 replicas, values for examples `[0, 1, 2, 3]` will be on replica 0 and `[4, 5, 6, 7]` will be on replica 1. With `axis=None`, `reduce` will aggregate only across replicas, returning `[0+4, 1+5, 2+6, 3+7]`. This is useful when each replica is computing a scalar or some other value that doesn't have a "batch" dimension (like a gradient or loss). ``` strategy.reduce("sum", per_replica_result, axis=None) ``` Sometimes, you will want to aggregate across both the global batch _and_ all replicas. You can get this behavior by specifying the batch dimension as the `axis`, typically `axis=0`. In this case it would return a scalar `0+1+2+3+4+5+6+7`. ``` strategy.reduce("sum", per_replica_result, axis=0) ``` If there is a last partial batch, you will need to specify an axis so that the resulting shape is consistent across replicas. So if the last batch has size 6 and it is divided into [0, 1, 2, 3] and [4, 5], you would get a shape mismatch unless you specify `axis=0`. If you specify `tf.distribute.ReduceOp.MEAN`, using `axis=0` will use the correct denominator of 6. Contrast this with computing `reduce_mean` to get a scalar value on each replica and this function to average those means, which will weigh some values `1/8` and others `1/4`. Args: reduce_op: a `tf.distribute.ReduceOp` value specifying how values should be combined. Allows using string representation of the enum such as "SUM", "MEAN". value: a `tf.distribute.DistributedValues` instance, e.g. returned by `Strategy.run`, to be combined into a single tensor. It can also be a regular tensor when used with `OneDeviceStrategy` or default strategy. axis: specifies the dimension to reduce along within each replica's tensor. Should typically be set to the batch dimension, or `None` to only reduce across replicas (e.g. if the tensor has no batch dimension). Returns: A `Tensor`. """ # TODO(josh11b): support `value` being a nest. _require_cross_replica_or_default_context_extended(self._extended) if isinstance(reduce_op, six.string_types): reduce_op = reduce_util.ReduceOp(reduce_op.upper()) if axis is None: return self._extended._reduce(reduce_op, value) # pylint: disable=protected-access if reduce_op == reduce_util.ReduceOp.SUM: def reduce_sum(v): return math_ops.reduce_sum(v, axis=axis) if eager_context.executing_eagerly(): # As some strategies (e.g. TPUStrategy) doesn't support pure eager # execution, wrap the `reduce_sum_fn` with a `tf.function` so it can be # run from eager mode. Cache the tf.function by `axis` to avoid the # same function to be traced again. if axis not in self._reduce_sum_fns: self._reduce_sum_fns[axis] = def_function.function(reduce_sum) value = self.run(self._reduce_sum_fns[axis], args=(value,)) else: value = self.run(reduce_sum, args=(value,)) return self._extended._reduce(reduce_op, value) # pylint: disable=protected-access if reduce_op != reduce_util.ReduceOp.MEAN: raise TypeError("Expected `reduce_op` to be a `tf.distribute.ReduceOp`, " "not: %r" % reduce_op) def mean_reduce_helper(v, axes=axis): """Computes the numerator and denominator on each replica.""" numer = math_ops.reduce_sum(v, axis=axes) def dimension(axis): if v.shape.rank is not None: # Note(joshl): We support axis < 0 to be consistent with the # tf.math.reduce_* operations. if axis < 0: if axis + v.shape.rank < 0: raise ValueError( "`axis` = %r out of range for `value` with rank %d" % (axis, v.shape.rank)) axis += v.shape.rank elif axis >= v.shape.rank: raise ValueError( "`axis` = %r out of range for `value` with rank %d" % (axis, v.shape.rank)) # TF v2 returns `None` for unknown dimensions and an integer for # known dimension, whereas TF v1 returns tensor_shape.Dimension(None) # or tensor_shape.Dimension(integer). `dimension_value` hides this # difference, always returning `None` or an integer. dim = tensor_shape.dimension_value(v.shape[axis]) if dim is not None: # By returning a python value in the static shape case, we can # maybe get a fast path for reducing the denominator. # TODO(b/151871486): Remove array_ops.identity after we fallback to # simple reduction if inputs are all on CPU. return array_ops.identity( constant_op.constant(dim, dtype=dtypes.int64)) elif axis < 0: axis = axis + array_ops.rank(v) # TODO(b/151871486): Remove array_ops.identity after we fallback to # simple reduction if inputs are all on CPU. return array_ops.identity( array_ops.shape_v2(v, out_type=dtypes.int64)[axis]) if isinstance(axis, six.integer_types): denom = dimension(axis) elif isinstance(axis, (tuple, list)): denom = math_ops.reduce_prod([dimension(a) for a in axes]) else: raise TypeError( "Expected `axis` to be an integer, tuple or list not: %r" % axis) # TODO(josh11b): Should we cast denom to v.dtype here instead of after the # reduce is complete? return numer, denom if eager_context.executing_eagerly(): # As some strategies (e.g. TPUStrategy) doesn't support pure eager # execution, wrap the `mean_reduce_helper` with a `tf.function` so it can # be run from eager mode. Cache the tf.function by `axis` to avoid the # same function to be traced again. if axis not in self._mean_reduce_helper_fns: self._mean_reduce_helper_fns[axis] = def_function.function( mean_reduce_helper) numer, denom = self.run(self._mean_reduce_helper_fns[axis], args=(value,)) else: numer, denom = self.run(mean_reduce_helper, args=(value,)) # TODO(josh11b): Should batch reduce here instead of doing two. numer = self._extended._reduce(reduce_util.ReduceOp.SUM, numer) # pylint: disable=protected-access denom = self._extended._reduce(reduce_util.ReduceOp.SUM, denom) # pylint: disable=protected-access denom = math_ops.cast(denom, numer.dtype) return math_ops.truediv(numer, denom) @doc_controls.do_not_doc_inheritable # DEPRECATED @deprecated(None, "use `experimental_local_results` instead.") def unwrap(self, value): """Returns the list of all local per-replica values contained in `value`. DEPRECATED: Please use `experimental_local_results` instead. Note: This only returns values on the workers initiated by this client. When using a `tf.distribute.Strategy` like `tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker will be its own client, and this function will only return values computed on that worker. Args: value: A value returned by `experimental_run()`, `extended.call_for_each_replica()`, or a variable created in `scope`. Returns: A tuple of values contained in `value`. If `value` represents a single value, this returns `(value,).` """ return self._extended._local_results(value) # pylint: disable=protected-access def experimental_local_results(self, value): """Returns the list of all local per-replica values contained in `value`. Note: This only returns values on the worker initiated by this client. When using a `tf.distribute.Strategy` like `tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker will be its own client, and this function will only return values computed on that worker. Args: value: A value returned by `experimental_run()`, `run(), or a variable created in `scope`. Returns: A tuple of values contained in `value` where ith element corresponds to ith replica. If `value` represents a single value, this returns `(value,).` """ return self._extended._local_results(value) # pylint: disable=protected-access @doc_controls.do_not_doc_inheritable # DEPRECATED: TF v1.x only def group(self, value, name=None): """Shortcut for `tf.group(self.experimental_local_results(value))`.""" return self._extended._group(value, name) # pylint: disable=protected-access @property def num_replicas_in_sync(self): """Returns number of replicas over which gradients are aggregated.""" return self._extended._num_replicas_in_sync # pylint: disable=protected-access @doc_controls.do_not_doc_inheritable # DEPRECATED: see doc string @deprecated(None, "use `update_config_proto` instead.") def configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None): # pylint: disable=g-doc-return-or-yield,g-doc-args """DEPRECATED: use `update_config_proto` instead. Configures the strategy class. DEPRECATED: This method's functionality has been split into the strategy constructor and `update_config_proto`. In the future, we will allow passing cluster and config_proto to the constructor to configure the strategy. And `update_config_proto` can be used to update the config_proto based on the specific strategy. """ return self._extended._configure( # pylint: disable=protected-access session_config, cluster_spec, task_type, task_id) @doc_controls.do_not_generate_docs # DEPRECATED def update_config_proto(self, config_proto): """DEPRECATED TF 1.x ONLY.""" return self._extended._update_config_proto(config_proto) # pylint: disable=protected-access def __deepcopy__(self, memo): # First do a regular deepcopy of `self`. cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): setattr(result, k, copy.deepcopy(v, memo)) # One little fix-up: we want `result._extended` to reference `result` # instead of `self`. result._extended._container_strategy_weakref = weakref.ref(result) # pylint: disable=protected-access return result def __copy__(self): raise RuntimeError("Must only deepcopy DistributionStrategy.") @property def cluster_resolver(self): """Returns the cluster resolver associated with this strategy. In general, when using a multi-worker `tf.distribute` strategy such as `tf.distribute.experimental.MultiWorkerMirroredStrategy` or `tf.distribute.TPUStrategy()`, there is a `tf.distribute.cluster_resolver.ClusterResolver` associated with the strategy used, and such an instance is returned by this property. Strategies that intend to have an associated `tf.distribute.cluster_resolver.ClusterResolver` must set the relevant attribute, or override this property; otherwise, `None` is returned by default. Those strategies should also provide information regarding what is returned by this property. Single-worker strategies usually do not have a `tf.distribute.cluster_resolver.ClusterResolver`, and in those cases this property will return `None`. The `tf.distribute.cluster_resolver.ClusterResolver` may be useful when the user needs to access information such as the cluster spec, task type or task id. For example, ```python os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ["localhost:12345", "localhost:23456"], 'ps': ["localhost:34567"] }, 'task': {'type': 'worker', 'index': 0} }) # This implicitly uses TF_CONFIG for the cluster and current task info. strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() ... if strategy.cluster_resolver.task_type == 'worker': # Perform something that's only applicable on workers. Since we set this # as a worker above, this block will run on this particular instance. elif strategy.cluster_resolver.task_type == 'ps': # Perform something that's only applicable on parameter servers. Since we # set this as a worker above, this block will not run on this particular # instance. ``` For more information, please see `tf.distribute.cluster_resolver.ClusterResolver`'s API docstring. Returns: The cluster resolver associated with this strategy. Returns `None` if a cluster resolver is not applicable or available in this strategy. """ if hasattr(self.extended, "_cluster_resolver"): return self.extended._cluster_resolver # pylint: disable=protected-access return None @tf_export("distribute.Strategy", v1=[]) # pylint: disable=g-missing-docstring
StrategyBase
python
openai__openai-python
src/openai/types/chat/chat_completion_custom_tool_param.py
{ "start": 529, "end": 775 }
class ____(TypedDict, total=False): definition: Required[str] """The grammar definition.""" syntax: Required[Literal["lark", "regex"]] """The syntax of the grammar definition. One of `lark` or `regex`."""
CustomFormatGrammarGrammar
python
great-expectations__great_expectations
great_expectations/data_context/store/configuration_store.py
{ "start": 1070, "end": 5466 }
class ____(Store): """ Configuration Store provides a way to store any Marshmallow Schema compatible Configuration (using the YAML format). """ # noqa: E501 # FIXME CoP _key_class = ConfigurationIdentifier _configuration_class = BaseYamlConfig def __init__( self, store_name: str, store_backend: Optional[dict] = None, overwrite_existing: bool = False, runtime_environment: Optional[dict] = None, ) -> None: if not issubclass(self._configuration_class, BaseYamlConfig): raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP "Invalid configuration: A configuration_class needs to inherit from the BaseYamlConfig class." # noqa: E501 # FIXME CoP ) if store_backend is not None: store_backend_module_name = store_backend.get( "module_name", "great_expectations.data_context.store" ) store_backend_class_name = store_backend.get("class_name", "InMemoryStoreBackend") verify_dynamic_loading_support(module_name=store_backend_module_name) store_backend_class = load_class(store_backend_class_name, store_backend_module_name) # Store Backend Class was loaded successfully; verify that it is of a correct subclass. if issubclass(store_backend_class, TupleStoreBackend): # Provide defaults for this common case store_backend["filepath_suffix"] = store_backend.get("filepath_suffix", ".yml") super().__init__( store_backend=store_backend, runtime_environment=runtime_environment, store_name=store_name, ) # Gather the call arguments of the present function (include the "module_name" and add the "class_name"), filter # noqa: E501 # FIXME CoP # out the Falsy values, and set the instance "_config" variable equal to the resulting dictionary. # noqa: E501 # FIXME CoP self._config = { "store_name": store_name, "store_backend": store_backend, "overwrite_existing": overwrite_existing, "runtime_environment": runtime_environment, "module_name": self.__class__.__module__, "class_name": self.__class__.__name__, } filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True) self._overwrite_existing = overwrite_existing def serialize(self, value): # type: ignore[explicit-override] # FIXME if self.cloud_mode: # GXCloudStoreBackend expects a json str config_schema = value.get_schema_class()() return config_schema.dump(value) return value.to_yaml_str() def deserialize(self, value): # type: ignore[explicit-override] # FIXME config = value if isinstance(value, str): config: CommentedMap = yaml.load(value) try: return self._configuration_class.from_commented_map(commented_map=config) except gx_exceptions.InvalidBaseYamlConfigError: # Just to be explicit about what we intended to catch raise except marshmallow.ValidationError as e: raise gx_exceptions.InvalidBaseYamlConfigError( # noqa: TRY003 # FIXME CoP f"Deserialized configuration failed validation: {e}" ) @property def overwrite_existing(self) -> bool: return self._overwrite_existing @overwrite_existing.setter def overwrite_existing(self, overwrite_existing: bool) -> None: self._overwrite_existing = overwrite_existing @property @override def config(self) -> dict: return self._config def get_key( self, name: Optional[str] = None, id: Optional[str] = None ) -> Union[GXCloudIdentifier, ConfigurationIdentifier]: assert bool(name) ^ bool(id), "Must provide either name or id." key: Union[GXCloudIdentifier, ConfigurationIdentifier] if id or self.cloud_mode: key = GXCloudIdentifier( resource_type=GXCloudRESTResource.CHECKPOINT, id=id, resource_name=name, ) else: key = ConfigurationIdentifier(configuration_key=name) # type: ignore[arg-type] # FIXME CoP return key
ConfigurationStore
python
pypa__warehouse
tests/common/db/base.py
{ "start": 115, "end": 546 }
class ____(SQLAlchemyModelFactory): class Meta: abstract = True sqlalchemy_session = Session @classmethod def _setup_next_sequence(cls, *args, **kwargs): return 0 @classmethod def _create(cls, *args, **kwargs): r = super()._create(*args, **kwargs) session = cls._meta.sqlalchemy_session session.flush() session.expire_all() return r
WarehouseFactory
python
ZoranPandovski__al-go-rithms
data_structures/Tree/python/tree_utils.py
{ "start": 179, "end": 1449 }
class ____(object): def in_order_traversal(self, root): if root: self.in_order_traversal(root.left) print(str(root.data)) self.in_order_traversal(root.right) def pre_order_traversal(self, root): if root: print(str(root.data)) self.pre_order_traversal(root.left) self.pre_order_traversal(root.right) def post_order_traversal(self, root): if root: self.post_order_traversal(root.left) self.post_order_traversal(root.right) print(str(root.data)) def level_order(self, root): level_queue = list() level_queue.append(root) while level_queue: current_node = level_queue.pop(0) print(current_node.data) if current_node.left: level_queue.append(current_node.left) if current_node.right: level_queue.append(current_node.right) def test(): l1 = TreeNode(4, None, None) l2 = TreeNode(5, None, None) l3 = TreeNode(6, None, None) l4 = TreeNode(7, None, None) l11 = TreeNode(2, l1, l2) l21 = TreeNode(3, l3, l4) root = TreeNode(1, l11, l21) traversal = Traversals() print("In-Order:") traversal.in_order_traversal(root) print("Pre-Order:") traversal.pre_order_traversal(root) print("Post-Order:") traversal.post_order_traversal(root) print("Level Order") traversal.level_order(root) test()
Traversals
python
doocs__leetcode
solution/1200-1299/1210.Minimum Moves to Reach Target with Rotations/Solution.py
{ "start": 0, "end": 1364 }
class ____: def minimumMoves(self, grid: List[List[int]]) -> int: def move(i1, j1, i2, j2): if 0 <= i1 < n and 0 <= j1 < n and 0 <= i2 < n and 0 <= j2 < n: a, b = i1 * n + j1, i2 * n + j2 status = 0 if i1 == i2 else 1 if (a, status) not in vis and grid[i1][j1] == 0 and grid[i2][j2] == 0: q.append((a, b)) vis.add((a, status)) n = len(grid) target = (n * n - 2, n * n - 1) q = deque([(0, 1)]) vis = {(0, 0)} ans = 0 while q: for _ in range(len(q)): a, b = q.popleft() if (a, b) == target: return ans i1, j1 = a // n, a % n i2, j2 = b // n, b % n # 尝试向右平移(保持身体水平/垂直状态) move(i1, j1 + 1, i2, j2 + 1) # 尝试向下平移(保持身体水平/垂直状态) move(i1 + 1, j1, i2 + 1, j2) # 当前处于水平状态,且 grid[i1 + 1][j2] 无障碍,尝试顺时针旋转90° if i1 == i2 and i1 + 1 < n and grid[i1 + 1][j2] == 0: move(i1, j1, i1 + 1, j1) # 当前处于垂直状态,且 grid[i2][j1 + 1] 无障碍,尝试逆时针旋转90° if j1 == j2 and j1 + 1 < n and grid[i2][j1 + 1] == 0: move(i1, j1, i1, j1 + 1) ans += 1 return -1
Solution
python
oauthlib__oauthlib
oauthlib/oauth2/rfc6749/errors.py
{ "start": 12227, "end": 12934 }
class ____(OAuth2Error): """ This error is a placeholder for all custom errors not described by the RFC. Some of the popular OAuth2 providers are using custom errors. """ def __init__(self, error, *args, **kwargs): self.error = error super().__init__(*args, **kwargs) def raise_from_error(error, params=None): kwargs = { 'description': params.get('error_description'), 'uri': params.get('error_uri'), 'state': params.get('state') } for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass): if cls.error == error: raise cls(**kwargs) raise CustomOAuth2Error(error=error, **kwargs)
CustomOAuth2Error
python
astropy__astropy
astropy/timeseries/periodograms/lombscargle/core.py
{ "start": 664, "end": 28092 }
class ____(BasePeriodogram): """Compute the Lomb-Scargle Periodogram. This implementations here are based on code presented in [1]_ and [2]_; if you use this functionality in an academic application, citation of those works would be appreciated. Parameters ---------- t : array-like or `~astropy.units.Quantity` ['time'] sequence of observation times y : array-like or `~astropy.units.Quantity` sequence of observations associated with times t dy : float, array-like, or `~astropy.units.Quantity`, optional error or sequence of observational errors associated with times t fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if fit_mean = False nterms : int, optional number of terms to use in the Fourier fit normalization : {'standard', 'model', 'log', 'psd'}, optional Normalization to use for the periodogram. Examples -------- Generate noisy periodic data: >>> rand = np.random.default_rng(42) >>> t = 100 * rand.random(100) >>> y = np.sin(2 * np.pi * t) + rand.standard_normal(100) Compute the Lomb-Scargle periodogram on an automatically-determined frequency grid & find the frequency of max power: >>> frequency, power = LombScargle(t, y).autopower() >>> frequency[np.argmax(power)] # doctest: +FLOAT_CMP np.float64(1.0007641728995051) Compute the Lomb-Scargle periodogram at a user-specified frequency grid: >>> freq = np.arange(0.8, 1.3, 0.1) >>> LombScargle(t, y).power(freq) # doctest: +FLOAT_CMP array([0.0792948 , 0.01778874, 0.25328167, 0.01064157, 0.01471387]) If the inputs are astropy Quantities with units, the units will be validated and the outputs will also be Quantities with appropriate units: >>> from astropy import units as u >>> t = t * u.s >>> y = y * u.mag >>> frequency, power = LombScargle(t, y).autopower() >>> frequency.unit Unit("1 / s") >>> power.unit Unit(dimensionless) Note here that the Lomb-Scargle power is always a unitless quantity, because it is related to the :math:`\\chi^2` of the best-fit periodic model at each frequency. References ---------- .. [1] Vanderplas, J., Connolly, A. Ivezic, Z. & Gray, A. *Introduction to astroML: Machine learning for astrophysics*. Proceedings of the Conference on Intelligent Data Understanding (2012) .. [2] VanderPlas, J. & Ivezic, Z. *Periodograms for Multiband Astronomical Time Series*. ApJ 812.1:18 (2015) """ available_methods = available_methods() def __init__( self, t, y, dy=None, fit_mean=True, center_data=True, nterms=1, normalization="standard", ): # If t is a TimeDelta, convert it to a quantity. The units we convert # to don't really matter since the user gets a Quantity back at the end # so can convert to any units they like. if isinstance(t, TimeDelta): t = t.to("day") # We want to expose self.t as being the times the user passed in, but # if the times are absolute, we need to convert them to relative times # internally, so we use self._trel and self._tstart for this. self.t = t if isinstance(self.t, Time): self._tstart = self.t[0] trel = (self.t - self._tstart).to(u.day) else: self._tstart = None trel = self.t self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy) self.fit_mean = fit_mean self.center_data = center_data self.nterms = nterms self.normalization = normalization def _validate_inputs(self, t, y, dy): # Validate shapes of inputs if dy is None: t, y = np.broadcast_arrays(t, y, subok=True) else: t, y, dy = np.broadcast_arrays(t, y, dy, subok=True) if t.ndim != 1: raise ValueError("Inputs (t, y, dy) must be 1-dimensional") # validate units of inputs if any is a Quantity if any(has_units(arr) for arr in (t, y, dy)): t, y = map(units.Quantity, (t, y)) if dy is not None: dy = units.Quantity(dy) try: dy = units.Quantity(dy, unit=y.unit) except units.UnitConversionError: raise ValueError("Units of dy not equivalent to units of y") return t, y, dy def _validate_frequency(self, frequency): frequency = np.asanyarray(frequency) if has_units(self._trel): frequency = units.Quantity(frequency) try: frequency = units.Quantity(frequency, unit=1.0 / self._trel.unit) except units.UnitConversionError: raise ValueError("Units of frequency not equivalent to units of 1/t") else: if has_units(frequency): raise ValueError("frequency have units while 1/t doesn't.") return frequency def _validate_t(self, t): t = np.asanyarray(t) if has_units(self._trel): t = units.Quantity(t) try: t = units.Quantity(t, unit=self._trel.unit) except units.UnitConversionError: raise ValueError("Units of t not equivalent to units of input self.t") return t def _power_unit(self, norm): if has_units(self.y): if self.dy is None and norm == "psd": return self.y.unit**2 else: return units.dimensionless_unscaled else: return 1 def autofrequency( self, samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, return_freq_limits=False, ): """Determine a suitable frequency grid for data. Note that this assumes the peak width is driven by the observational baseline, which is generally a good assumption when the baseline is much larger than the oscillation period. If you are searching for periods longer than the baseline of your observations, this may not perform well. Even with a large baseline, be aware that the maximum frequency returned is based on the concept of "average Nyquist frequency", which may not be useful for irregularly-sampled data. The maximum frequency can be adjusted via the nyquist_factor argument, or through the maximum_frequency argument. Parameters ---------- samples_per_peak : float, optional The approximate number of desired samples across the typical peak nyquist_factor : float, optional The multiple of the average nyquist frequency used to choose the maximum frequency if maximum_frequency is not provided. minimum_frequency : float, optional If specified, then use this minimum frequency rather than one chosen based on the size of the baseline. maximum_frequency : float, optional If specified, then use this maximum frequency rather than one chosen based on the average nyquist frequency. return_freq_limits : bool, optional if True, return only the frequency limits rather than the full frequency grid. Returns ------- frequency : ndarray or `~astropy.units.Quantity` ['frequency'] The heuristically-determined optimal frequency bin """ baseline = self._trel.max() - self._trel.min() n_samples = self._trel.size df = 1.0 / baseline / samples_per_peak if minimum_frequency is None: minimum_frequency = 0.5 * df if maximum_frequency is None: avg_nyquist = 0.5 * n_samples / baseline maximum_frequency = nyquist_factor * avg_nyquist Nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df)) if return_freq_limits: return minimum_frequency, minimum_frequency + df * (Nf - 1) else: return minimum_frequency + df * np.arange(Nf) def autopower( self, method="auto", method_kwds=None, normalization=None, samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, ): """Compute Lomb-Scargle power at automatically-determined frequencies. Parameters ---------- method : str, optional specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'slow': use the O[N^2] pure-python implementation - 'cython': use the O[N^2] cython implementation. This is slightly faster than method='slow', but much more memory efficient. - 'chi2': use the O[N^2] chi2/linear-fitting implementation - 'fastchi2': use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. method_kwds : dict, optional additional keywords to pass to the lomb-scargle method normalization : {'standard', 'model', 'log', 'psd'}, optional If specified, override the normalization specified at instantiation. samples_per_peak : float, optional The approximate number of desired samples across the typical peak nyquist_factor : float, optional The multiple of the average nyquist frequency used to choose the maximum frequency if maximum_frequency is not provided. minimum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional If specified, then use this minimum frequency rather than one chosen based on the size of the baseline. Should be `~astropy.units.Quantity` if inputs to LombScargle are `~astropy.units.Quantity`. maximum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional If specified, then use this maximum frequency rather than one chosen based on the average nyquist frequency. Should be `~astropy.units.Quantity` if inputs to LombScargle are `~astropy.units.Quantity`. Returns ------- frequency, power : ndarray The frequency and Lomb-Scargle power """ frequency = self.autofrequency( samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency, ) power = self.power( frequency, normalization=normalization, method=method, method_kwds=method_kwds, assume_regular_frequency=True, ) return frequency, power def power( self, frequency, normalization=None, method="auto", assume_regular_frequency=False, method_kwds=None, ): """Compute the Lomb-Scargle power at the given frequencies. Parameters ---------- frequency : array-like or `~astropy.units.Quantity` ['frequency'] frequencies (not angular frequencies) at which to evaluate the periodogram. Note that in order to use method='fast', frequencies must be regularly-spaced. method : str, optional specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'slow': use the O[N^2] pure-python implementation - 'cython': use the O[N^2] cython implementation. This is slightly faster than method='slow', but much more memory efficient. - 'chi2': use the O[N^2] chi2/linear-fitting implementation - 'fastchi2': use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. assume_regular_frequency : bool, optional if True, assume that the input frequency is of the form freq = f0 + df * np.arange(N). Only referenced if method is 'auto' or 'fast'. normalization : {'standard', 'model', 'log', 'psd'}, optional If specified, override the normalization specified at instantiation. method_kwds : dict, optional additional keywords to pass to the lomb-scargle method Returns ------- power : ndarray The Lomb-Scargle power at the specified frequency """ if normalization is None: normalization = self.normalization frequency = self._validate_frequency(frequency) power = lombscargle( *strip_units(self._trel, self.y, self.dy), frequency=strip_units(frequency), center_data=self.center_data, fit_mean=self.fit_mean, nterms=self.nterms, normalization=normalization, method=method, method_kwds=method_kwds, assume_regular_frequency=assume_regular_frequency, ) return power * self._power_unit(normalization) def _as_relative_time(self, name, times): """ Convert the provided times (if absolute) to relative times using the current _tstart value. If the times provided are relative, they are returned without conversion (though we still do some checks). """ if isinstance(times, TimeDelta): times = times.to("day") if self._tstart is None: if isinstance(times, Time): raise TypeError( f"{name} was provided as an absolute time but " "the LombScargle class was initialized " "with relative times." ) else: if isinstance(times, Time): times = (times - self._tstart).to(u.day) else: raise TypeError( f"{name} was provided as a relative time but " "the LombScargle class was initialized " "with absolute times." ) return times def model(self, t, frequency): """Compute the Lomb-Scargle model at the given frequency. The model at a particular frequency is a linear model: model = offset + dot(design_matrix, model_parameters) Parameters ---------- t : array-like or `~astropy.units.Quantity` ['time'] Times (length ``n_samples``) at which to compute the model. frequency : float the frequency for the model Returns ------- y : np.ndarray The model fit corresponding to the input times (will have length ``n_samples``). See Also -------- design_matrix offset model_parameters """ frequency = self._validate_frequency(frequency) t = self._validate_t(self._as_relative_time("t", t)) y_fit = periodic_fit( *strip_units(self._trel, self.y, self.dy), frequency=strip_units(frequency), t_fit=strip_units(t), center_data=self.center_data, fit_mean=self.fit_mean, nterms=self.nterms, ) return y_fit * get_unit(self.y) def offset(self): """Return the offset of the model. The offset of the model is the (weighted) mean of the y values. Note that if self.center_data is False, the offset is 0 by definition. Returns ------- offset : scalar See Also -------- design_matrix model model_parameters """ y, dy = strip_units(self.y, self.dy) if dy is None: dy = 1 dy = np.broadcast_to(dy, y.shape) if self.center_data: w = dy**-2.0 y_mean = np.dot(y, w) / w.sum() else: y_mean = 0 return y_mean * get_unit(self.y) def model_parameters(self, frequency, units=True): r"""Compute the best-fit model parameters at the given frequency. The model described by these parameters is: .. math:: y(t; f, \vec{\theta}) = \theta_0 + \sum_{n=1}^{\tt nterms} [\theta_{2n-1}\sin(2\pi n f t) + \theta_{2n}\cos(2\pi n f t)] where :math:`\vec{\theta}` is the array of parameters returned by this function. Parameters ---------- frequency : float the frequency for the model units : bool If True (default), return design matrix with data units. Returns ------- theta : np.ndarray (n_parameters,) The best-fit model parameters at the given frequency. See Also -------- design_matrix model offset """ frequency = self._validate_frequency(frequency) t, y, dy = strip_units(self._trel, self.y, self.dy) if self.center_data: y = y - strip_units(self.offset()) dy = np.ones_like(y) if dy is None else np.asarray(dy) X = self.design_matrix(frequency) parameters = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y / dy)) if units: parameters = get_unit(self.y) * parameters return parameters def design_matrix(self, frequency, t=None): """Compute the design matrix for a given frequency. Parameters ---------- frequency : float the frequency for the model t : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional) Times (length ``n_samples``) at which to compute the model. If not specified, then the times and uncertainties of the input data are used. Returns ------- X : array The design matrix for the model at the given frequency. This should have a shape of (``len(t)``, ``n_parameters``). See Also -------- model model_parameters offset """ if t is None: t, dy = strip_units(self._trel, self.dy) else: t, dy = strip_units(self._validate_t(self._as_relative_time("t", t)), None) return design_matrix(t, frequency, dy, nterms=self.nterms, bias=self.fit_mean) def distribution(self, power, cumulative=False): """Expected periodogram distribution under the null hypothesis. This computes the expected probability distribution or cumulative probability distribution of periodogram power, under the null hypothesis of a non-varying signal with Gaussian noise. Note that this is not the same as the expected distribution of peak values; for that see the ``false_alarm_probability()`` method. Parameters ---------- power : array-like The periodogram power at which to compute the distribution. cumulative : bool, optional If True, then return the cumulative distribution. See Also -------- false_alarm_probability false_alarm_level Returns ------- dist : np.ndarray The probability density or cumulative probability associated with the provided powers. """ dH = 1 if self.fit_mean or self.center_data else 0 dK = dH + 2 * self.nterms dist = _statistics.cdf_single if cumulative else _statistics.pdf_single return dist(power, len(self._trel), self.normalization, dH=dH, dK=dK) def false_alarm_probability( self, power, method="baluev", samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, method_kwds=None, ): """False alarm probability of periodogram maxima under the null hypothesis. This gives an estimate of the false alarm probability given the height of the largest peak in the periodogram, based on the null hypothesis of non-varying data with Gaussian noise. Parameters ---------- power : array-like The periodogram value. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. maximum_frequency : float The maximum frequency of the periodogram. method_kwds : dict, optional Additional method-specific keywords. Returns ------- false_alarm_probability : np.ndarray The false alarm probability Notes ----- The true probability distribution for the largest peak cannot be determined analytically, so each method here provides an approximation to the value. The available methods are: - "baluev" (default): the upper-limit to the alias-free probability, using the approach of Baluev (2008) [1]_. - "davies" : the Davies upper bound from Baluev (2008) [1]_. - "naive" : the approximate probability based on an estimated effective number of independent frequencies. - "bootstrap" : the approximate probability based on bootstrap resamplings of the input data. Note also that for normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- distribution false_alarm_level References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if self.nterms != 1: raise NotImplementedError( "false alarm probability is not implemented for multiterm periodograms." ) if not (self.fit_mean or self.center_data): raise NotImplementedError( "false alarm probability is implemented " "only for periodograms of centered data." ) fmin, fmax = self.autofrequency( samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency, return_freq_limits=True, ) return _statistics.false_alarm_probability( power, fmax=fmax, t=self._trel, y=self.y, dy=self.dy, normalization=self.normalization, method=method, method_kwds=method_kwds, ) def false_alarm_level( self, false_alarm_probability, method="baluev", samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, method_kwds=None, ): """Level of maximum at a given false alarm probability. This gives an estimate of the periodogram level corresponding to a specified false alarm probability for the largest peak, assuming a null hypothesis of non-varying data with Gaussian noise. Parameters ---------- false_alarm_probability : array-like The false alarm probability (0 < fap < 1). maximum_frequency : float The maximum frequency of the periodogram. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use; default='baluev'. method_kwds : dict, optional Additional method-specific keywords. Returns ------- power : np.ndarray The periodogram peak height corresponding to the specified false alarm probability. Notes ----- The true probability distribution for the largest peak cannot be determined analytically, so each method here provides an approximation to the value. The available methods are: - "baluev" (default): the upper-limit to the alias-free probability, using the approach of Baluev (2008) [1]_. - "davies" : the Davies upper bound from Baluev (2008) [1]_. - "naive" : the approximate probability based on an estimated effective number of independent frequencies. - "bootstrap" : the approximate probability based on bootstrap resamplings of the input data. The number of samples can be set with the method-specific keyword "n_bootstraps" (default=1000). Note also that for normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- distribution false_alarm_probability References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if self.nterms != 1: raise NotImplementedError( "false alarm probability is not implemented for multiterm periodograms." ) if not (self.fit_mean or self.center_data): raise NotImplementedError( "false alarm probability is implemented " "only for periodograms of centered data." ) fmin, fmax = self.autofrequency( samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency, return_freq_limits=True, ) return _statistics.false_alarm_level( false_alarm_probability, fmax=fmax, t=self._trel, y=self.y, dy=self.dy, normalization=self.normalization, method=method, method_kwds=method_kwds, )
LombScargle
python
numba__numba
numba/tests/test_optimisation_pipelines.py
{ "start": 163, "end": 1692 }
class ____(TestCase): """ Tests that pass manager is not overriding the intended optimization level. """ def _get_llvmir(self, fn, sig): with override_config('OPT', 0): fn.compile(sig) return fn.inspect_llvm(sig) def test_override_config(self): @njit(debug=True, error_model='numpy') def foo(a): b = a + 1.23 c = b * 2.34 d = b / c print(d) return d sig = (types.float64,) full_ir = self._get_llvmir(foo, sig=sig) module = llvm.parse_assembly(full_ir) name = foo.overloads[foo.signatures[0]].fndesc.mangled_name funcs = [x for x in module.functions if x.name == name] self.assertEqual(len(funcs), 1) func = funcs[0] blocks = [x for x in func.blocks] self.assertGreater(len(blocks), 1) block = blocks[0] # Find sequence with non-debug instructions instrs = [x for x in block.instructions if x.opcode != 'call'] op_expect = {'fadd', 'fmul', 'fdiv'} started = False for x in instrs: if x.opcode in op_expect: op_expect.remove(x.opcode) if not started: started = True elif op_expect and started: break self.assertGreater(len(op_expect), 0, "Function was optimized unexpectedly") if __name__ == '__main__': unittest.main()
TestPassManagerOptimization
python
pytorch__pytorch
torch/_inductor/codegen/simd_kernel_features.py
{ "start": 22180, "end": 22903 }
class ____: """Memory usage stats that are collected for both persistent and looped kernels""" reads: StatsForReadsOrWrites writes: StatsForReadsOrWrites memory: StatsForReadsOrWrites @classmethod def compute( cls, loops: list[MemoryEstimate], estimator: MemoryEstimator ) -> typing.Self: reads = StatsForReadsOrWrites.compute( [loop.reads for loop in loops], estimator.symbols ) writes = StatsForReadsOrWrites.compute( [loop.writes for loop in loops], estimator.symbols ) return cls( reads=reads, writes=writes, memory=reads + writes, ) @dataclasses.dataclass
StatsForKernelType
python
langchain-ai__langchain
libs/core/langchain_core/tools/base.py
{ "start": 8064, "end": 12126 }
class ____: """Configuration for Pydantic models generated from function signatures.""" extra: str = "forbid" """Whether to allow extra fields in the model.""" arbitrary_types_allowed: bool = True """Whether to allow arbitrary types in the model.""" def create_schema_from_function( model_name: str, func: Callable, *, filter_args: Sequence[str] | None = None, parse_docstring: bool = False, error_on_invalid_docstring: bool = False, include_injected: bool = True, ) -> type[BaseModel]: """Create a Pydantic schema from a function's signature. Args: model_name: Name to assign to the generated Pydantic schema. func: Function to generate the schema from. filter_args: Optional list of arguments to exclude from the schema. Defaults to `FILTERED_ARGS`. parse_docstring: Whether to parse the function's docstring for descriptions for each argument. error_on_invalid_docstring: if `parse_docstring` is provided, configure whether to raise `ValueError` on invalid Google Style docstrings. include_injected: Whether to include injected arguments in the schema. Defaults to `True`, since we want to include them in the schema when *validating* tool inputs. Returns: A Pydantic model with the same arguments as the function. """ sig = inspect.signature(func) if _function_annotations_are_pydantic_v1(sig, func): validated = validate_arguments_v1(func, config=_SchemaConfig) # type: ignore[call-overload] else: # https://docs.pydantic.dev/latest/usage/validation_decorator/ with warnings.catch_warnings(): # We are using deprecated functionality here. # This code should be re-written to simply construct a Pydantic model # using inspect.signature and create_model. warnings.simplefilter("ignore", category=PydanticDeprecationWarning) validated = validate_arguments(func, config=_SchemaConfig) # type: ignore[operator] # Let's ignore `self` and `cls` arguments for class and instance methods # If qualified name has a ".", then it likely belongs in a class namespace in_class = bool(func.__qualname__ and "." in func.__qualname__) has_args = False has_kwargs = False for param in sig.parameters.values(): if param.kind == param.VAR_POSITIONAL: has_args = True elif param.kind == param.VAR_KEYWORD: has_kwargs = True inferred_model = validated.model if filter_args: filter_args_ = filter_args else: # Handle classmethods and instance methods existing_params: list[str] = list(sig.parameters.keys()) if existing_params and existing_params[0] in {"self", "cls"} and in_class: filter_args_ = [existing_params[0], *list(FILTERED_ARGS)] else: filter_args_ = list(FILTERED_ARGS) for existing_param in existing_params: if not include_injected and _is_injected_arg_type( sig.parameters[existing_param].annotation ): filter_args_.append(existing_param) description, arg_descriptions = _infer_arg_descriptions( func, parse_docstring=parse_docstring, error_on_invalid_docstring=error_on_invalid_docstring, ) # Pydantic adds placeholder virtual fields we need to strip valid_properties = [] for field in get_fields(inferred_model): if not has_args and field == "args": continue if not has_kwargs and field == "kwargs": continue if field == "v__duplicate_kwargs": # Internal pydantic field continue if field not in filter_args_: valid_properties.append(field) return _create_subset_model( model_name, inferred_model, list(valid_properties), descriptions=arg_descriptions, fn_description=description, )
_SchemaConfig
python
huggingface__transformers
tests/models/dbrx/test_modeling_dbrx.py
{ "start": 2645, "end": 3560 }
class ____(CausalLMModelTest, unittest.TestCase): model_tester_class = DbrxModelTester @slow def test_model_from_pretrained(self): model_name = "trl-internal-testing/tiny-DbrxForCausalLM" model = DbrxModel.from_pretrained(model_name) self.assertIsNotNone(model) # Offload does not work with Dbrx models because of the forward of DbrxExperts where we chunk the experts. # The issue is that the offloaded weights of the mlp layer are still on meta device (w1_chunked, v1_chunked, w2_chunked) @unittest.skip(reason="Dbrx models do not work with offload") def test_cpu_offload(self): pass @unittest.skip(reason="Dbrx models do not work with offload") def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Dbrx models do not work with offload") def test_disk_offload_bin(self): pass @require_torch
DbrxModelTest
python
apache__airflow
airflow-core/src/airflow/serialization/definitions/param.py
{ "start": 2984, "end": 5549 }
class ____(collections.abc.Mapping[str, Any]): """Server-side ParamsDict class for deserialization.""" __dict: dict[str, SerializedParam] def __init__(self, d: Mapping[str, Any] | None = None) -> None: self.__dict = dict(_collect_params(d)) def __eq__(self, other: Any) -> bool: """Compare params dicts using their dumped content, matching SDK behavior.""" if hasattr(other, "dump"): # ParamsDict or SerializedParamsDict return self.dump() == other.dump() if isinstance(other, collections.abc.Mapping): return self.dump() == other return NotImplemented def __hash__(self): return hash(self.dump()) def __contains__(self, key: object) -> bool: return key in self.__dict def __len__(self) -> int: return len(self.__dict) def __iter__(self) -> Iterator[str]: return iter(self.__dict) def __getitem__(self, key: str) -> Any: """ Get the resolved value for this key. This matches SDK ParamsDict behavior. """ return self.__dict[key].value def get_param(self, key: str) -> SerializedParam: """Get the internal SerializedParam object for this key.""" return self.__dict[key] def items(self): return collections.abc.ItemsView(self.__dict) def values(self): return collections.abc.ValuesView(self.__dict) def validate(self) -> dict[str, Any]: """Validate & returns all the params stored in the dictionary.""" def _validate_one(k: str, v: SerializedParam): try: return v.resolve(raises=True) except Exception as e: raise ValueError(f"Invalid input for param {k}: {e}") from None return {k: _validate_one(k, v) for k, v in self.__dict.items()} def dump(self) -> Mapping[str, Any]: """Dump the resolved values as a mapping.""" return {k: v.resolve() for k, v in self.__dict.items()} def deep_merge(self, data: Mapping[str, Any] | None) -> SerializedParamsDict: """Create a new params dict by merging incoming data into this params dict.""" params = copy.deepcopy(self) if not data: return params for k, v in data.items(): if k not in params: params.__dict[k] = _coerce_param(v) elif isinstance(v, SerializedParam): params.__dict[k] = v else: params.__dict[k].value = v return params
SerializedParamsDict
python
sphinx-doc__sphinx
sphinx/domains/c/_ast.py
{ "start": 19071, "end": 20008 }
class ____(ASTExpression): def __init__(self, typ: ASTType) -> None: self.typ = typ def __eq__(self, other: object) -> bool: if not isinstance(other, ASTAlignofExpr): return NotImplemented return self.typ == other.typ def __hash__(self) -> int: return hash(self.typ) def _stringify(self, transform: StringifyTransform) -> str: return 'alignof(' + transform(self.typ) + ')' def describe_signature( self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol ) -> None: signode += addnodes.desc_sig_keyword('alignof', 'alignof') signode += addnodes.desc_sig_punctuation('(', '(') self.typ.describe_signature(signode, mode, env, symbol) signode += addnodes.desc_sig_punctuation(')', ')') # Other expressions ################################################################################
ASTAlignofExpr
python
allegroai__clearml
clearml/backend_api/services/v2_13/queues.py
{ "start": 25271, "end": 26132 }
class ____(Response): """ Response of queues.create endpoint. :param id: New queue ID :type id: str """ _service = "queues" _action = "create" _version = "2.13" _schema = { "definitions": {}, "properties": {"id": {"description": "New queue ID", "type": ["string", "null"]}}, "type": "object", } def __init__(self, id: Optional[str] = None, **kwargs: Any) -> None: super(CreateResponse, self).__init__(**kwargs) self.id = id @schema_property("id") def id(self) -> Optional[str]: return self._property_id @id.setter def id(self, value: Optional[str]) -> None: if value is None: self._property_id = None return self.assert_isinstance(value, "id", six.string_types) self._property_id = value
CreateResponse
python
Lightning-AI__lightning
src/lightning/pytorch/_graveyard/tpu.py
{ "start": 2019, "end": 2427 }
class ____(XLAAccelerator): """Legacy class. Use :class:`~lightning.pytorch.accelerators.xla.XLAAccelerator` instead. """ def __init__(self, *args: Any, **kwargs: Any) -> None: rank_zero_deprecation( "The `TPUAccelerator` class is deprecated. Use `lightning.pytorch.accelerators.XLAAccelerator` instead." ) super().__init__(*args, **kwargs)
TPUAccelerator
python
pytorch__pytorch
test/distributed/test_c10d_nccl.py
{ "start": 181734, "end": 197893 }
class ____(test_c10d_common.AbstractLargeCommTest, MultiProcessTestCase): def setUp(self): super().setUp() # TORCH_NCCL_BLOCKING_WAIT overrides TORCH_NCCL_ASYNC_ERROR_HANDLING hence tests # that use TORCH_NCCL_BLOCKING_WAIT will test it as expected. os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1" self._spawn_processes() def tearDown(self): super().tearDown() try: os.remove(self.file_name) except OSError: pass @property def device(self): return self.rank @requires_nccl() @skip_if_lt_x_gpu(4) def test_new_group_local_sync(self): self._test_new_group_local_sync(backend="nccl") @requires_nccl() @skip_if_lt_x_gpu(4) def test_new_group_local_sync_sanity_check(self): self._test_new_group_local_sync_sanity_check(backend="nccl") @requires_nccl() @skip_if_lt_x_gpu(4) def test_new_group_local_sync_duplicated_pg(self): self._test_new_group_local_sync_duplicate_pg(backend="nccl") def _init_two_pg2_subgroups(self, world_size: int = 4): if world_size != 4: raise NotImplementedError( f"need world size of 4 to get 2 subgroup PGs, but got world size of {world_size}" ) store = c10d.FileStore(self.file_name, world_size) c10d.init_process_group( backend="nccl", store=store, rank=self.rank, world_size=world_size ) # every rank creates the same sub groups # including unused sub groups in the current rank a_group = c10d.new_group([0, 1]) b_group = c10d.new_group([2, 3]) return a_group if self.rank < 2 else b_group @requires_nccl() @skip_if_lt_x_gpu(4) @parametrize("group_rank", [True, False]) def test_gather_subgroup(self, group_rank): world_size = 4 if self.rank >= world_size: # just easier to write the test for exactly 4 gpus, even if this test class increased to 8gpu later return subgroup = self._init_two_pg2_subgroups(world_size) device = torch.device(f"cuda:{self.rank:d}") input = torch.ones((10,), device=device) * self.rank if self.rank == 0 or self.rank == 2: gather_list = [torch.empty_like(input) for _ in range(subgroup.size())] if group_rank: # global_dst=0 group_dst=0 my_global_rank=2 gather_list is not None=True torch.distributed.gather( input, gather_list=gather_list, group_dst=0, group=subgroup, async_op=False, ) else: torch.distributed.gather( input, gather_list=gather_list, dst=self.rank, group=subgroup, async_op=False, ) for src in range(len(gather_list)): expected = (torch.ones_like(input) * self.rank) + src self.assertEqual(gather_list[src], expected) else: if group_rank: torch.distributed.gather( input, gather_list=None, group_dst=0, group=subgroup, async_op=False, ) else: torch.distributed.gather( input, gather_list=None, dst=self.rank - 1, group=subgroup, async_op=False, ) @requires_nccl() @skip_if_lt_x_gpu(4) @parametrize("group_rank", [True, False]) def test_gather_object_subgroup(self, group_rank): world_size = 4 if self.rank >= world_size: # just easier to write the test for exactly 4 gpus, even if this test class increased to 8gpu later return subgroup = self._init_two_pg2_subgroups(world_size) # discrepancy #1 # have to set device or else gather_object gets wrong device from 'current_device = _get_pg_default_device(group) torch.cuda.set_device(self.rank) input = {"rank": self.rank} if self.rank == 0 or self.rank == 2: # discrepancy #2 # another weird thing- what's the point of making me specify some empty objects in my list? # empty list should be valid imo. (but it throws an error) gather_list = [{}, {}] if group_rank: torch.distributed.gather_object( input, object_gather_list=gather_list, group_dst=0, group=subgroup ) else: torch.distributed.gather_object( input, object_gather_list=gather_list, dst=self.rank, group=subgroup ) for src in range(len(gather_list)): self.assertEqual(gather_list[src]["rank"], self.rank + src) else: if group_rank: torch.distributed.gather_object( input, object_gather_list=None, group_dst=0, group=subgroup ) else: torch.distributed.gather_object( input, object_gather_list=None, dst=self.rank - 1, group=subgroup ) @requires_nccl() @skip_if_lt_x_gpu(4) @parametrize("group_rank", [True, False]) def test_reduce_subgroup(self, group_rank): world_size = 4 if self.rank >= world_size: return subgroup = self._init_two_pg2_subgroups(world_size) device = torch.device(f"cuda:{self.rank:d}") x = torch.ones((10,), device=device) * self.rank if self.rank == 0 or self.rank == 2: expected = x + torch.ones((10,), device=device) * (self.rank + 1) if group_rank: c10d.reduce(x, group_dst=0, group=subgroup, async_op=False) else: c10d.reduce(x, dst=self.rank, group=subgroup, async_op=False) self.assertEqual(x, expected) else: if group_rank: c10d.reduce(x, group_dst=0, group=subgroup, async_op=False) else: c10d.reduce(x, dst=self.rank - 1, group=subgroup, async_op=False) @requires_nccl() @skip_if_lt_x_gpu(4) @parametrize("group_rank", [True, False]) @parametrize("async_op", [True, False]) def test_send_recv_subgroup(self, async_op, group_rank): world_size = 4 if self.rank >= world_size: return subgroup = self._init_two_pg2_subgroups(world_size) device = torch.device(f"cuda:{self.rank:d}") if self.rank == 0 or self.rank == 2: x = torch.empty((10,), device=device) if async_op: if group_rank: c10d.irecv(x, group_src=1, group=subgroup).wait() else: c10d.irecv(x, src=self.rank + 1, group=subgroup).wait() else: if group_rank: c10d.recv(x, group_src=1, group=subgroup) else: c10d.recv(x, src=self.rank + 1, group=subgroup) expected = torch.ones((10,), device=device) * (self.rank + 1) self.assertEqual(x, expected) else: x = torch.ones((10,), device=device) * self.rank if async_op: if group_rank: c10d.isend(x, group_dst=0, group=subgroup).wait() else: c10d.isend(x, dst=self.rank - 1, group=subgroup).wait() else: if group_rank: c10d.send(x, group_dst=0, group=subgroup) else: c10d.send(x, dst=self.rank - 1, group=subgroup) @requires_nccl() @skip_if_lt_x_gpu(4) @parametrize("group_rank", [True, False]) def test_batch_send_recv_subgroup(self, group_rank): world_size = 4 if self.rank >= world_size: return subgroup = self._init_two_pg2_subgroups(world_size) device = torch.device(f"cuda:{self.rank:d}") ops = [] if self.rank == 0 or self.rank == 2: x = torch.empty((10,), device=device) if group_rank: ops.append(c10d.P2POp(dist.irecv, x, group=subgroup, group_peer=1)) else: ops.append( c10d.P2POp(dist.irecv, x, peer=self.rank + 1, group=subgroup) ) for work in dist.batch_isend_irecv(ops): work.wait() expected = torch.ones((10,), device=device) * (self.rank + 1) self.assertEqual(x, expected) else: x = torch.ones((10,), device=device) * self.rank if group_rank: ops.append(c10d.P2POp(dist.isend, x, group=subgroup, group_peer=0)) else: ops.append( c10d.P2POp(dist.isend, x, peer=self.rank - 1, group=subgroup) ) for work in dist.batch_isend_irecv(ops): work.wait() @requires_nccl() @skip_if_lt_x_gpu(4) @parametrize("group_rank", [True, False]) def test_broadcast_subgroup(self, group_rank): world_size = 4 if self.rank >= world_size: return subgroup = self._init_two_pg2_subgroups(world_size) device = torch.device(f"cuda:{self.rank:d}") if self.rank == 0 or self.rank == 2: x = torch.empty((10,), device=device) if group_rank: c10d.broadcast(x, group_src=1, group=subgroup) else: c10d.broadcast(x, src=self.rank + 1, group=subgroup) expected = torch.ones((10,), device=device) * (self.rank + 1) self.assertEqual(x, expected) else: x = torch.ones((10,), device=device) * self.rank if group_rank: c10d.broadcast(x, group_src=1, group=subgroup) else: c10d.broadcast(x, src=self.rank, group=subgroup) @requires_nccl() @skip_if_lt_x_gpu(4) @parametrize( "set_device", [SetDeviceMethod.TORCH_CUDA_SET, SetDeviceMethod.COLLECTIVE_ARGUMENT], ) @parametrize("group_rank", [True, False]) def test_send_recv_object_list_subgroup( self, set_device: SetDeviceMethod, group_rank ): world_size = 4 if self.rank >= world_size: return subgroup = self._init_two_pg2_subgroups(world_size) if set_device == SetDeviceMethod.TORCH_CUDA_SET: torch.cuda.set_device(self.rank) device = None else: device = torch.device(f"cuda:{self.rank:d}") if self.rank == 0 or self.rank == 2: x = [{}] if group_rank: c10d.recv_object_list(x, group_src=1, group=subgroup, device=device) else: c10d.recv_object_list( x, src=self.rank + 1, group=subgroup, device=device ) expected = [{"rank": self.rank + 1}] self.assertEqual(x, expected) else: x = [{"rank": self.rank}] if group_rank: c10d.send_object_list(x, group_dst=0, group=subgroup, device=device) else: c10d.send_object_list( x, dst=self.rank - 1, group=subgroup, device=device ) @requires_nccl() @skip_if_lt_x_gpu(4) @parametrize( "set_device", [SetDeviceMethod.TORCH_CUDA_SET, SetDeviceMethod.COLLECTIVE_ARGUMENT], ) @parametrize("group_rank", [True, False]) def test_broadcast_object_list_subgroup( self, set_device: SetDeviceMethod, group_rank ): world_size = 4 if self.rank >= world_size: return subgroup = self._init_two_pg2_subgroups(world_size) if set_device == SetDeviceMethod.TORCH_CUDA_SET: torch.cuda.set_device(self.rank) device = None else: device = torch.device(f"cuda:{self.rank:d}") if self.rank == 0 or self.rank == 2: x = [{}] if group_rank: c10d.broadcast_object_list( x, group_src=1, group=subgroup, device=device ) else: c10d.broadcast_object_list( x, src=self.rank + 1, group=subgroup, device=device ) expected = [{"rank": self.rank + 1}] self.assertEqual(x, expected) else: x = [{"rank": self.rank}] if group_rank: c10d.broadcast_object_list( x, group_src=1, group=subgroup, device=device ) else: c10d.broadcast_object_list( x, src=self.rank, group=subgroup, device=device ) @requires_nccl() @skip_if_lt_x_gpu(4) @parametrize("group_rank", [True, False]) def test_scatter_subgroup(self, group_rank): world_size = 4 if self.rank >= world_size: return subgroup = self._init_two_pg2_subgroups(world_size) device = torch.device(f"cuda:{self.rank:d}") x = torch.empty((10,), device=device) expected = torch.ones((10,), device=device) * self.rank if self.rank == 0 or self.rank == 2: if group_rank: c10d.scatter(x, scatter_list=None, group_src=1, group=subgroup) else: c10d.scatter(x, scatter_list=None, src=self.rank + 1, group=subgroup) else: scatter_list = [ torch.ones((10,), device=device) * (self.rank - 1), torch.ones((10,), device=device) * self.rank, ] if group_rank: c10d.scatter(x, scatter_list=scatter_list, group_src=1, group=subgroup) else: c10d.scatter( x, scatter_list=scatter_list, src=self.rank, group=subgroup ) self.assertEqual(x, expected) @requires_nccl() @skip_if_lt_x_gpu(4) @parametrize("group_rank", [True, False]) def test_scatter_object_list_subgroup(self, group_rank): world_size = 4 if self.rank >= world_size: return subgroup = self._init_two_pg2_subgroups(world_size) torch.cuda.set_device(self.rank) scatter_object_output_list = [None] expected = [{"rank": self.rank}] if self.rank == 0 or self.rank == 2: if group_rank: c10d.scatter_object_list( scatter_object_output_list=scatter_object_output_list, scatter_object_input_list=None, group_src=1, group=subgroup, ) else: c10d.scatter_object_list( scatter_object_output_list=scatter_object_output_list, scatter_object_input_list=None, src=self.rank + 1, group=subgroup, ) else: scatter_object_input_list = [ {"rank": self.rank - 1}, {"rank": self.rank}, ] if group_rank: c10d.scatter_object_list( scatter_object_output_list=scatter_object_output_list, scatter_object_input_list=scatter_object_input_list, group_src=1, group=subgroup, ) else: c10d.scatter_object_list( scatter_object_output_list=scatter_object_output_list, scatter_object_input_list=scatter_object_input_list, src=self.rank, group=subgroup, ) self.assertEqual(scatter_object_output_list, expected) instantiate_parametrized_tests(LargeCommTest)
LargeCommTest
python
apache__airflow
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_monitor.py
{ "start": 1262, "end": 1434 }
class ____: @pytest.fixture(autouse=True) def setup(self) -> None: clear_db_jobs() def teardown_method(self): clear_db_jobs()
TestMonitorEndpoint
python
encode__django-rest-framework
tests/schemas/views.py
{ "start": 5359, "end": 5518 }
class ____(generics.GenericAPIView): serializer_class = ExampleSerializerModel def get(self, *args, **kwargs): pass
ExampleOperationIdDuplicate1
python
sympy__sympy
sympy/series/formal.py
{ "start": 42161, "end": 44077 }
class ____(FiniteFormalPowerSeries): """Represents the product of two formal power series of two functions. Explanation =========== No computation is performed. Terms are calculated using a term by term logic, instead of a point by point logic. There are two differences between a :obj:`FormalPowerSeries` object and a :obj:`FormalPowerSeriesProduct` object. The first argument contains the two functions involved in the product. Also, the coefficient sequence contains both the coefficient sequence of the formal power series of the involved functions. See Also ======== sympy.series.formal.FormalPowerSeries sympy.series.formal.FiniteFormalPowerSeries """ def __init__(self, *args): ffps, gfps = self.ffps, self.gfps k = ffps.ak.variables[0] self.coeff1 = sequence(ffps.ak.formula, (k, 0, oo)) k = gfps.ak.variables[0] self.coeff2 = sequence(gfps.ak.formula, (k, 0, oo)) @property def function(self): """Function of the product of two formal power series.""" return self.f * self.g def _eval_terms(self, n): """ Returns the first ``n`` terms of the product formal power series. Term by term logic is implemented here. Examples ======== >>> from sympy import fps, sin, exp >>> from sympy.abc import x >>> f1 = fps(sin(x)) >>> f2 = fps(exp(x)) >>> fprod = f1.product(f2, x) >>> fprod._eval_terms(4) x**3/3 + x**2 + x See Also ======== sympy.series.formal.FormalPowerSeries.product """ coeff1, coeff2 = self.coeff1, self.coeff2 aks = convolution(coeff1[:n], coeff2[:n]) terms = [] for i in range(0, n): terms.append(aks[i] * self.ffps.xk.coeff(i)) return Add(*terms)
FormalPowerSeriesProduct