language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
cherrypy__cherrypy
cherrypy/process/win32.py
{ "start": 196, "end": 2464 }
class ____(plugins.SimplePlugin): """A WSPBus plugin for handling Win32 console events (like Ctrl-C).""" def __init__(self, bus): """Initialize the console control handler.""" self.is_set = False plugins.SimplePlugin.__init__(self, bus) def start(self): """Register handling of the console control events.""" if self.is_set: self.bus.log('Handler for console events already set.', level=20) return result = win32api.SetConsoleCtrlHandler(self.handle, 1) if result == 0: self.bus.log( 'Could not SetConsoleCtrlHandler (error %r)' % win32api.GetLastError(), level=40, ) else: self.bus.log('Set handler for console events.', level=20) self.is_set = True def stop(self): """Unregister the console control handlers.""" if not self.is_set: self.bus.log('Handler for console events already off.', level=20) return try: result = win32api.SetConsoleCtrlHandler(self.handle, 0) except ValueError: # "ValueError: The object has not been registered" result = 1 if result == 0: self.bus.log( 'Could not remove SetConsoleCtrlHandler (error %r)' % win32api.GetLastError(), level=40, ) else: self.bus.log('Removed handler for console events.', level=20) self.is_set = False def handle(self, event): """Handle console control events (like Ctrl-C).""" if event in ( win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT, win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT, win32con.CTRL_CLOSE_EVENT, ): self.bus.log('Console event %s: shutting down bus' % event) # Remove self immediately so repeated Ctrl-C doesn't re-call it. try: self.stop() except ValueError: pass self.bus.exit() # 'First to return True stops the calls' return 1 return 0
ConsoleCtrlHandler
python
dagster-io__dagster
python_modules/dagster-test/dagster_test/components/simple_pipes_script_asset.py
{ "start": 1746, "end": 2556 }
class ____(Component): """A simple asset that runs a Python script with the Pipes subprocess client. Because it is a pipes asset, no value is returned. """ @classmethod def get_model_cls(cls): return SimplePipesScriptComponentModel def __init__(self, asset_key: AssetKey, script_path: Path): self._asset_key = asset_key self._script_path = script_path def build_defs(self, context: ComponentLoadContext) -> Definitions: @asset(key=self._asset_key) def _asset(context: AssetExecutionContext, pipes_client: PipesSubprocessClient): cmd = [shutil.which("python"), self._script_path] return pipes_client.run(command=cmd, context=context).get_results() return Definitions(assets=[_asset])
SimplePipesScriptComponent
python
great-expectations__great_expectations
tests/data_context/test_data_context_state_management.py
{ "start": 2367, "end": 6516 }
class ____(EphemeralDataContext): """ Simply wraps around EphemeralDataContext but keeps tabs on specific method calls around state management. """ # noqa: E501 # FIXME CoP def __init__( self, project_config: DataContextConfig, ) -> None: # expectation store is required for initializing the base DataContext self._expectations_store = ExpectationsStoreSpy() self._checkpoint_store = CheckpointStoreSpy() super().__init__(project_config) self.save_count = 0 self._datasource_store = DatasourceStoreSpy() @property def datasource_store(self): return self._datasource_store @property def expectations_store(self): return self._expectations_store @property def checkpoint_store(self): return self._checkpoint_store def _save_project_config(self): """ No-op our persistence mechanism but increment an internal counter to ensure it was used. """ self.save_count += 1 BLOCK_CONFIG_DATASOURCE_NAME = "my_pandas_datasource" @pytest.fixture def in_memory_data_context( fluent_datasource_config: dict, ) -> EphemeralDataContextSpy: config = DataContextConfig( store_backend_defaults=InMemoryStoreBackendDefaults(), ) context = EphemeralDataContextSpy(project_config=config) ds_type = DataSourceManager.type_lookup[fluent_datasource_config["type"]] fluent_datasources = { fluent_datasource_config["name"]: ds_type(**fluent_datasource_config), } context.data_sources.all().update(fluent_datasources) set_context(context) return context @pytest.mark.unit def test_add_store(in_memory_data_context: EphemeralDataContextSpy): context = in_memory_data_context num_stores_before = len(context.stores) num_store_configs_before = len(context.config.stores) context.add_store( name="my_new_store", config={ "module_name": "great_expectations.data_context.store", "class_name": "ExpectationsStore", }, ) num_stores_after = len(context.stores) num_store_configs_after = len(context.config.stores) assert num_stores_after == num_stores_before + 1 assert num_store_configs_after == num_store_configs_before + 1 assert context.save_count == 1 @pytest.mark.unit def test_delete_store_success(in_memory_data_context: EphemeralDataContextSpy): context = in_memory_data_context num_stores_before = len(context.stores) num_store_configs_before = len(context.config.stores) context.delete_store("checkpoint_store") # We know this to be a default name num_stores_after = len(context.stores) num_store_configs_after = len(context.config.stores) assert num_stores_after == num_stores_before - 1 assert num_store_configs_after == num_store_configs_before - 1 assert context.save_count == 1 @pytest.mark.unit def test_delete_store_failure(in_memory_data_context: EphemeralDataContextSpy): context = in_memory_data_context num_stores_before = len(context.stores) num_store_configs_before = len(context.config.stores) with pytest.raises(StoreConfigurationError): context.delete_store("my_fake_store_name") num_stores_after = len(context.stores) num_store_configs_after = len(context.config.stores) assert num_stores_after == num_stores_before assert num_store_configs_after == num_store_configs_before assert context.save_count == 0 @pytest.mark.unit @pytest.mark.parametrize( "config", [ pytest.param( DataContextConfig(progress_bars=ProgressBarsConfig(globally=True)), id="DataContextConfig", ), pytest.param({"progress_bars": ProgressBarsConfig(globally=True)}, id="Mapping"), ], ) def test_update_project_config( in_memory_data_context: EphemeralDataContextSpy, config: DataContextConfig | Mapping ): context = in_memory_data_context assert context.progress_bars is None context.update_project_config(config) assert context.progress_bars["globally"] is True
EphemeralDataContextSpy
python
scipy__scipy
scipy/sparse/_lil.py
{ "start": 18801, "end": 20889 }
class ____(spmatrix, _lil_base): """ Row-based LIst of Lists sparse matrix. This is a structure for constructing sparse matrices incrementally. Note that inserting a single item can take linear time in the worst case; to construct the matrix efficiently, make sure the items are pre-sorted by index, per row. This can be instantiated in several ways: lil_matrix(D) where D is a 2-D ndarray lil_matrix(S) with another sparse array or matrix S (equivalent to S.tolil()) lil_matrix((M, N), [dtype]) to construct an empty matrix with shape (M, N) dtype is optional, defaulting to dtype='d'. Attributes ---------- dtype : dtype Data type of the matrix shape : 2-tuple Shape of the matrix ndim : int Number of dimensions (this is always 2) nnz size data LIL format data array of the matrix rows LIL format row index array of the matrix T Notes ----- Sparse matrices can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the LIL format - supports flexible slicing - changes to the matrix sparsity structure are efficient Disadvantages of the LIL format - arithmetic operations LIL + LIL are slow (consider CSR or CSC) - slow column slicing (consider CSC) - slow matrix vector products (consider CSR or CSC) Intended Usage - LIL is a convenient format for constructing sparse matrices - once a matrix has been constructed, convert to CSR or CSC format for fast arithmetic and matrix vector operations - consider using the COO format when constructing large matrices Data Structure - An array (``self.rows``) of rows, each of which is a sorted list of column indices of non-zero elements. - The corresponding nonzero values are stored in similar fashion in ``self.data``. """
lil_matrix
python
skorch-dev__skorch
examples/benchmarks/history.py
{ "start": 706, "end": 3621 }
class ____(Callback): def on_batch_end(self, net, **kwargs): side_effects.append(( torch.cuda.memory_allocated() / 1e6, torch.cuda.memory_cached() / 1e6 )) def train(): X, y = make_classification(1000, 20, n_informative=10, random_state=0) X = X.astype(np.float32) y = y.astype(np.int64) module = make_classifier(input_units=20) net = NeuralNetClassifier( module, max_epochs=10, lr=0.1, callbacks=[TriggerKeyError(), PrintMemory()], device='cuda', ) return net.fit(X, y) def safe_slice(history, keys): # catch errors for key in keys: try: history[key] except (KeyError, IndexError): pass def performance_history(history): # SUCCESSFUL # level 0 for i in range(len(history)): history[i] # level 1 keys = tuple(history[0].keys()) history[0, keys] history[:, keys] for key in keys: history[0, key] history[:, key] # level 2 for i in range(len(history[0, 'batches'])): history[0, 'batches', i] history[:, 'batches', i] history[:, 'batches', :] # level 3 keys = tuple(history[0, 'batches', 0].keys()) history[0, 'batches', 0, keys] history[:, 'batches', 0, keys] history[0, 'batches', :, keys] history[:, 'batches', :, keys] for key in history[0, 'batches', 0]: history[0, 'batches', 0, key] history[:, 'batches', 0, key] history[0, 'batches', :, key] history[:, 'batches', :, key] # KEY ERRORS # level 0 safe_slice(history, [100000]) # level 1 safe_slice(history, [np.s_[0, 'foo'], np.s_[:, 'foo']]) # level 2 safe_slice(history, [ np.s_[0, 'batches', 0], np.s_[:, 'batches', 0], np.s_[0, 'batches', :], np.s_[:, 'batches', :], ]) # level 3 safe_slice(history, [ np.s_[0, 'batches', 0, 'foo'], np.s_[:, 'batches', 0, 'foo'], np.s_[0, 'batches', :, 'foo'], np.s_[:, 'batches', :, 'foo'], np.s_[0, 'batches', 0, ('foo', 'bar')], np.s_[:, 'batches', 0, ('foo', 'bar')], np.s_[0, 'batches', :, ('foo', 'bar')], np.s_[:, 'batches', :, ('foo', 'bar')], ]) if __name__ == '__main__': net = train() tic = time.time() for _ in range(1000): performance_history(net.history) toc = time.time() print("Time for performing 1000 runs: {:.5f} sec.".format(toc - tic)) assert toc - tic < 10, "accessing history is too slow" print("Allocated / cached memory") pprint(side_effects) mem_start = side_effects[0][0] mem_end = side_effects[-1][0] print("Memory epoch 1: {:.4f}, last epoch: {:.4f}".format( mem_start, mem_end)) assert np.isclose(mem_start, mem_end, rtol=1/3), "memory use should be similar"
PrintMemory
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/waiters/test_bedrock_agent.py
{ "start": 1558, "end": 3019 }
class ____(TestBedrockAgentCustomWaitersBase): WAITER_NAME = "knowledge_base_active" WAITER_ARGS = {"knowledgeBaseId": "kb_id"} SENSOR = BedrockKnowledgeBaseActiveSensor @pytest.fixture def mock_getter(self): with mock.patch.object(self.client, "get_knowledge_base") as getter: yield getter @pytest.mark.parametrize("state", SENSOR.SUCCESS_STATES) def test_knowledge_base_active_complete(self, state, mock_getter): mock_getter.return_value = {"knowledgeBase": {"status": state}} BedrockAgentHook().get_waiter(self.WAITER_NAME).wait(**self.WAITER_ARGS) @pytest.mark.parametrize("state", SENSOR.FAILURE_STATES) def test_knowledge_base_active_failed(self, state, mock_getter): mock_getter.return_value = {"knowledgeBase": {"status": state}} with pytest.raises(botocore.exceptions.WaiterError): BedrockAgentHook().get_waiter(self.WAITER_NAME).wait(**self.WAITER_ARGS) @pytest.mark.parametrize("state", SENSOR.INTERMEDIATE_STATES) def test_knowledge_base_active_wait(self, state, mock_getter): wait = {"knowledgeBase": {"status": state}} success = {"knowledgeBase": {"status": "ACTIVE"}} mock_getter.side_effect = [wait, wait, success] BedrockAgentHook().get_waiter(self.WAITER_NAME).wait( **self.WAITER_ARGS, WaiterConfig={"Delay": 0.01, "MaxAttempts": 3}, )
TestKnowledgeBaseActiveWaiter
python
ray-project__ray
python/ray/data/datasource/datasink.py
{ "start": 471, "end": 1209 }
class ____(Generic[WriteReturnType]): """Aggregated result of the Datasink write operations.""" # Total number of written rows. num_rows: int # Total size in bytes of written data. size_bytes: int # All returned values of `Datasink.write`. write_returns: List[WriteReturnType] @classmethod def combine(cls, *wrs: "WriteResult") -> "WriteResult": num_rows = sum(wr.num_rows for wr in wrs) size_bytes = sum(wr.size_bytes for wr in wrs) write_returns = list(itertools.chain(*[wr.write_returns for wr in wrs])) return WriteResult( num_rows=num_rows, size_bytes=size_bytes, write_returns=write_returns, ) @DeveloperAPI
WriteResult
python
cherrypy__cherrypy
cherrypy/test/test_wsgiapps.py
{ "start": 99, "end": 4084 }
class ____(helper.CPWebCase): @staticmethod def setup_server(): def test_app(environ, start_response): status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) output = [ 'Hello, world!\n', 'This is a wsgi app running within CherryPy!\n\n', ] keys = list(environ.keys()) keys.sort() for k in keys: output.append('%s: %s\n' % (k, environ[k])) return [ntob(x, 'utf-8') for x in output] def test_empty_string_app(environ, start_response): status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) return [ b'Hello', b'', b' ', b'', b'world', ] class WSGIResponse(object): def __init__(self, appresults): self.appresults = appresults self.iter = iter(appresults) def __iter__(self): return self if sys.version_info >= (3, 0): def __next__(self): return next(self.iter) else: def next(self): return self.iter.next() def close(self): if hasattr(self.appresults, 'close'): self.appresults.close() class ReversingMiddleware(object): def __init__(self, app): self.app = app def __call__(self, environ, start_response): results = app(environ, start_response) class Reverser(WSGIResponse): if sys.version_info >= (3, 0): def __next__(this): line = list(next(this.iter)) line.reverse() return bytes(line) else: def next(this): line = list(this.iter.next()) line.reverse() return ''.join(line) return Reverser(results) class Root: @cherrypy.expose def index(self): return ntob("I'm a regular CherryPy page handler!") cherrypy.tree.mount(Root()) cherrypy.tree.graft(test_app, '/hosted/app1') cherrypy.tree.graft(test_empty_string_app, '/hosted/app3') # Set script_name explicitly to None to signal CP that it should # be pulled from the WSGI environ each time. app = cherrypy.Application(Root(), script_name=None) cherrypy.tree.graft(ReversingMiddleware(app), '/hosted/app2') wsgi_output = """Hello, world! This is a wsgi app running within CherryPy!""" def test_01_standard_app(self): self.getPage('/') self.assertBody("I'm a regular CherryPy page handler!") def test_04_pure_wsgi(self): if not cherrypy.server.using_wsgi: return self.skip('skipped (not using WSGI)... ') self.getPage('/hosted/app1') self.assertHeader('Content-Type', 'text/plain') self.assertInBody(self.wsgi_output) def test_05_wrapped_cp_app(self): if not cherrypy.server.using_wsgi: return self.skip('skipped (not using WSGI)... ') self.getPage('/hosted/app2/') body = list("I'm a regular CherryPy page handler!") body.reverse() body = ''.join(body) self.assertInBody(body) def test_06_empty_string_app(self): if not cherrypy.server.using_wsgi: return self.skip('skipped (not using WSGI)... ') self.getPage('/hosted/app3') self.assertHeader('Content-Type', 'text/plain') self.assertInBody('Hello world')
WSGIGraftTests
python
pypa__hatch
tests/conftest.py
{ "start": 1023, "end": 1129 }
class ____(NamedTuple): repo: str index_name: str user: str auth: str ca_cert: str
Devpi
python
run-llama__llama_index
llama-index-core/llama_index/core/chat_engine/types.py
{ "start": 15961, "end": 17409 }
class ____(str, Enum): """Chat Engine Modes.""" SIMPLE = "simple" """Corresponds to `SimpleChatEngine`. Chat with LLM, without making use of a knowledge base. """ CONDENSE_QUESTION = "condense_question" """Corresponds to `CondenseQuestionChatEngine`. First generate a standalone question from conversation context and last message, then query the query engine for a response. """ CONTEXT = "context" """Corresponds to `ContextChatEngine`. First retrieve text from the index using the user's message, then use the context in the system prompt to generate a response. """ CONDENSE_PLUS_CONTEXT = "condense_plus_context" """Corresponds to `CondensePlusContextChatEngine`. First condense a conversation and latest user message to a standalone question. Then build a context for the standalone question from a retriever, Then pass the context along with prompt and user message to LLM to generate a response. """ REACT = "react" """Corresponds to `ReActAgent`. Use a ReAct agent loop with query engine tools. NOTE: Deprecated and unsupported. """ OPENAI = "openai" """Corresponds to `OpenAIAgent`. Use an OpenAI function calling agent loop. NOTE: Deprecated and unsupported. """ BEST = "best" """Select the best chat engine based on the current LLM. Corresponds to `condense_plus_context` """
ChatMode
python
ray-project__ray
python/ray/train/predictor.py
{ "start": 930, "end": 9689 }
class ____(abc.ABC): """Predictors load models from checkpoints to perform inference. .. note:: The base ``Predictor`` class cannot be instantiated directly. Only one of its subclasses can be used. **How does a Predictor work?** Predictors expose a ``predict`` method that accepts an input batch of type ``DataBatchType`` and outputs predictions of the same type as the input batch. When the ``predict`` method is called the following occurs: - The input batch is converted into a pandas DataFrame. Tensor input (like a ``np.ndarray``) will be converted into a single column Pandas Dataframe. - If there is a :ref:`Preprocessor <preprocessor-ref>` saved in the provided :class:`Checkpoint <ray.train.Checkpoint>`, the preprocessor will be used to transform the DataFrame. - The transformed DataFrame will be passed to the model for inference (via the ``predictor._predict_pandas`` method). - The predictions will be outputted by ``predict`` in the same type as the original input. **How do I create a new Predictor?** To implement a new Predictor for your particular framework, you should subclass the base ``Predictor`` and implement the following two methods: 1. ``_predict_pandas``: Given a pandas.DataFrame input, return a pandas.DataFrame containing predictions. 2. ``from_checkpoint``: Logic for creating a Predictor from a :class:`Checkpoint <ray.train.Checkpoint>`. 3. Optionally ``_predict_numpy`` for better performance when working with tensor data to avoid extra copies from Pandas conversions. """ def __init__(self, preprocessor: Optional[Preprocessor] = None): """Subclasseses must call Predictor.__init__() to set a preprocessor.""" self._preprocessor: Optional[Preprocessor] = preprocessor # Whether tensor columns should be automatically cast from/to the tensor # extension type at UDF boundaries. This can be overridden by subclasses. self._cast_tensor_columns = False @classmethod @abc.abstractmethod def from_checkpoint(cls, checkpoint: Checkpoint, **kwargs) -> "Predictor": """Create a specific predictor from a checkpoint. Args: checkpoint: Checkpoint to load predictor data from. kwargs: Arguments specific to predictor implementations. Returns: Predictor: Predictor object. """ raise NotImplementedError @classmethod def from_pandas_udf( cls, pandas_udf: Callable[[pd.DataFrame], pd.DataFrame] ) -> "Predictor": """Create a Predictor from a Pandas UDF. Args: pandas_udf: A function that takes a pandas.DataFrame and other optional kwargs and returns a pandas.DataFrame. """ class PandasUDFPredictor(Predictor): @classmethod def from_checkpoint(cls, checkpoint: Checkpoint, **kwargs) -> "Predictor": return PandasUDFPredictor() def _predict_pandas(self, df, **kwargs) -> "pd.DataFrame": return pandas_udf(df, **kwargs) return PandasUDFPredictor() def get_preprocessor(self) -> Optional[Preprocessor]: """Get the preprocessor to use prior to executing predictions.""" return self._preprocessor def set_preprocessor(self, preprocessor: Optional[Preprocessor]) -> None: """Set the preprocessor to use prior to executing predictions.""" self._preprocessor = preprocessor @classmethod @DeveloperAPI def preferred_batch_format(cls) -> BatchFormat: """Batch format hint for upstream producers to try yielding best block format. The preferred batch format to use if both `_predict_pandas` and `_predict_numpy` are implemented. Defaults to Pandas. Can be overriden by predictor classes depending on the framework type, e.g. TorchPredictor prefers Numpy and XGBoostPredictor prefers Pandas as native batch format. """ return BatchFormat.PANDAS @classmethod def _batch_format_to_use(cls) -> BatchFormat: """Determine the batch format to use for the predictor.""" has_pandas_implemented = cls._predict_pandas != Predictor._predict_pandas has_numpy_implemented = cls._predict_numpy != Predictor._predict_numpy if has_pandas_implemented and has_numpy_implemented: return cls.preferred_batch_format() elif has_pandas_implemented: return BatchFormat.PANDAS elif has_numpy_implemented: return BatchFormat.NUMPY else: raise NotImplementedError( f"Predictor {cls.__name__} must implement at least one of " "`_predict_pandas` and `_predict_numpy`." ) def _set_cast_tensor_columns(self): """Enable automatic tensor column casting. If this is called on a predictor, the predictor will cast tensor columns to NumPy ndarrays in the input to the preprocessors and cast tensor columns back to the tensor extension type in the prediction outputs. """ self._cast_tensor_columns = True def predict(self, data: DataBatchType, **kwargs) -> DataBatchType: """Perform inference on a batch of data. Args: data: A batch of input data of type ``DataBatchType``. kwargs: Arguments specific to predictor implementations. These are passed directly to ``_predict_numpy`` or ``_predict_pandas``. Returns: DataBatchType: Prediction result. The return type will be the same as the input type. """ if not hasattr(self, "_preprocessor"): raise NotImplementedError( "Subclasses of Predictor must call Predictor.__init__(preprocessor)." ) try: batch_format = TYPE_TO_ENUM[type(data)] except KeyError: raise RuntimeError( f"Invalid input data type of {type(data)}, supported " f"types: {list(TYPE_TO_ENUM.keys())}" ) if self._preprocessor: data = self._preprocessor.transform_batch(data) batch_format_to_use = self._batch_format_to_use() # We can finish prediction as long as one predict method is implemented. # For prediction, we have to return back in the same format as the input. if batch_format == BatchFormat.PANDAS: if batch_format_to_use == BatchFormat.PANDAS: return self._predict_pandas( _convert_batch_type_to_pandas(data), **kwargs ) elif batch_format_to_use == BatchFormat.NUMPY: return _convert_batch_type_to_pandas( self._predict_numpy(_convert_batch_type_to_numpy(data), **kwargs) ) elif batch_format == BatchFormat.NUMPY: if batch_format_to_use == BatchFormat.PANDAS: return _convert_batch_type_to_numpy( self._predict_pandas(_convert_batch_type_to_pandas(data), **kwargs) ) elif batch_format_to_use == BatchFormat.NUMPY: return self._predict_numpy(_convert_batch_type_to_numpy(data), **kwargs) @DeveloperAPI def _predict_pandas(self, data: "pd.DataFrame", **kwargs) -> "pd.DataFrame": """Perform inference on a Pandas DataFrame. Args: data: A pandas DataFrame to perform predictions on. kwargs: Arguments specific to the predictor implementation. Returns: A pandas DataFrame containing the prediction result. """ raise NotImplementedError @DeveloperAPI def _predict_numpy( self, data: Union[np.ndarray, Dict[str, np.ndarray]], **kwargs ) -> Union[np.ndarray, Dict[str, np.ndarray]]: """Perform inference on a Numpy data. All Predictors working with tensor data (like deep learning predictors) should implement this method. Args: data: A Numpy ndarray or dictionary of ndarrays to perform predictions on. kwargs: Arguments specific to the predictor implementation. Returns: A Numpy ndarray or dictionary of ndarray containing the prediction result. """ raise NotImplementedError def __reduce__(self): raise PredictorNotSerializableException( "Predictor instances are not serializable. Instead, you may want " "to serialize a checkpoint and initialize the Predictor with " "Predictor.from_checkpoint." )
Predictor
python
airbytehq__airbyte
airbyte-integrations/connectors/source-google-analytics-data-api/components.py
{ "start": 476, "end": 1975 }
class ____(RecordExtractor): """ Extractor that merges the output of multiple sub-extractors into a single record. This extractor takes a list of `RecordExtractor` instances (`extractors`), each of which independently extracts records from the response. For each response, the extractor: 1. Invokes each sub-extractor to generate iterables of records. 2. Zips the results together, so that the first record from each extractor is combined, the second from each, and so on. 3. Merges each group of records into a single dictionary using `dict.update()`. The result is a sequence of dictionaries where each dictionary contains the merged keys and values from the corresponding records across all extractors. Example: keys_extractor -> yields: [{"name": "Alice", "age": 30}] extra_data_extractor -> yields: [{"country": "US"}] CombinedExtractor(extractors=[keys_extractor, extra_data_extractor]) -> yields: [{"name": "Alice", "age": 30, "country": "US"}] """ extractors: List[RecordExtractor] def extract_records(self, response: requests.Response) -> Iterable[MutableMapping[Any, Any]]: extractors_records = [extractor.extract_records(response) for extractor in self.extractors] for records in zip(*extractors_records): merged = {} for record in records: merged.update(record) # merge all fields yield merged @dataclass
CombinedExtractor
python
crytic__slither
slither/core/slither_core.py
{ "start": 1201, "end": 25841 }
class ____(Context): """ Slither static analyzer """ def __init__(self) -> None: super().__init__() self._filename: Optional[str] = None self._raw_source_code: Dict[str, str] = {} self._source_code_to_line: Optional[Dict[str, List[str]]] = None self._previous_results_filename: str = "slither.db.json" # TODO: add cli flag to set these variables self.sarif_input: str = "export.sarif" self.sarif_triage: str = "export.sarif.sarifexplorer" self._results_to_hide: List = [] self._previous_results: List = [] # From triaged result self._previous_results_ids: Set[str] = set() # Every slither object has a list of result from detector # Because of the multiple compilation support, we might analyze # Multiple time the same result, so we remove duplicates self._currently_seen_resuts: Set[str] = set() self._paths_to_filter: Set[str] = set() self._paths_to_include: Set[str] = set() self._crytic_compile: Optional[CryticCompile] = None self._generate_patches = False self._exclude_dependencies = False self._markdown_root = "" # If set to true, slither will not catch errors during parsing self._disallow_partial: bool = False self._skip_assembly: bool = False self._show_ignored_findings = False # Maps from file to detector name to the start/end ranges for that detector. # Infinity is used to signal a detector has no end range. self._ignore_ranges: Dict[str, Dict[str, List[Tuple[int, ...]]]] = defaultdict( lambda: defaultdict(lambda: [(-1, -1)]) ) self._compilation_units: List[SlitherCompilationUnit] = [] self._contracts: List[Contract] = [] self._contracts_derived: List[Contract] = [] self._offset_to_min_offset: Optional[Dict[Filename, Dict[int, Set[int]]]] = None self._offset_to_objects: Optional[Dict[Filename, Dict[int, Set[SourceMapping]]]] = None self._offset_to_references: Optional[Dict[Filename, Dict[int, Set[Source]]]] = None self._offset_to_implementations: Optional[Dict[Filename, Dict[int, Set[Source]]]] = None self._offset_to_definitions: Optional[Dict[Filename, Dict[int, Set[Source]]]] = None # Line prefix is used during the source mapping generation # By default we generate file.sol#1 # But we allow to alter this (ex: file.sol:1) for vscode integration self.line_prefix: str = "#" # Use by the echidna printer # If true, partial analysis is allowed self.no_fail = False self.skip_data_dependency = False @property def compilation_units(self) -> List[SlitherCompilationUnit]: return list(self._compilation_units) def add_compilation_unit(self, compilation_unit: SlitherCompilationUnit): self._compilation_units.append(compilation_unit) # endregion ################################################################################### ################################################################################### # region Contracts ################################################################################### ################################################################################### @property def contracts(self) -> List[Contract]: if not self._contracts: all_contracts = [ compilation_unit.contracts for compilation_unit in self._compilation_units ] self._contracts = [item for sublist in all_contracts for item in sublist] return self._contracts @property def contracts_derived(self) -> List[Contract]: if not self._contracts_derived: all_contracts = [ compilation_unit.contracts_derived for compilation_unit in self._compilation_units ] self._contracts_derived = [item for sublist in all_contracts for item in sublist] return self._contracts_derived def get_contract_from_name(self, contract_name: Union[str, Constant]) -> List[Contract]: """ Return a contract from a name Args: contract_name (str): name of the contract Returns: Contract """ contracts = [] for compilation_unit in self._compilation_units: contracts += compilation_unit.get_contract_from_name(contract_name) return contracts ################################################################################### ################################################################################### # region Source code ################################################################################### ################################################################################### @property def source_code(self) -> Dict[str, str]: """{filename: source_code (str)}: source code""" return self._raw_source_code @property def filename(self) -> Optional[str]: """str: Filename.""" return self._filename @filename.setter def filename(self, filename: str): self._filename = filename def add_source_code(self, path: str) -> None: """ :param path: :return: """ if self.crytic_compile and path in self.crytic_compile.src_content: self.source_code[path] = self.crytic_compile.src_content[path] else: with open(path, encoding="utf8", newline="") as f: self.source_code[path] = f.read() self.parse_ignore_comments(path) @property def markdown_root(self) -> str: return self._markdown_root def print_functions(self, d: str): """ Export all the functions to dot files """ for compilation_unit in self._compilation_units: for c in compilation_unit.contracts: for f in c.functions: f.cfg_to_dot(os.path.join(d, f"{c.name}.{f.name}.dot")) def _compute_offsets_from_thing(self, thing: SourceMapping): definition = get_definition(thing, self.crytic_compile) references = get_references(thing) implementations = get_all_implementations(thing, self.contracts) # Create the offset mapping for offset in range(definition.start, definition.end + 1): self._offset_to_min_offset[definition.filename][offset].add(definition.start) is_declared_function = ( isinstance(thing, FunctionContract) and thing.contract_declarer == thing.contract ) should_add_to_objects = ( isinstance(thing, (TopLevel, Contract)) or is_declared_function or (isinstance(thing, ContractLevel) and not isinstance(thing, FunctionContract)) ) if should_add_to_objects: self._offset_to_objects[definition.filename][definition.start].add(thing) self._offset_to_definitions[definition.filename][definition.start].add(definition) self._offset_to_implementations[definition.filename][definition.start].update( implementations ) self._offset_to_references[definition.filename][definition.start] |= set(references) # For references should_add_to_objects = ( isinstance(thing, TopLevel) or is_declared_function or (isinstance(thing, ContractLevel) and not isinstance(thing, FunctionContract)) ) for ref in references: for offset in range(ref.start, ref.end + 1): self._offset_to_min_offset[definition.filename][offset].add(ref.start) if should_add_to_objects: self._offset_to_objects[definition.filename][ref.start].add(thing) if is_declared_function: # Only show the nearest lexical definition for declared contract-level functions if ( thing.contract.source_mapping.start < ref.start < thing.contract.source_mapping.end ): self._offset_to_definitions[ref.filename][ref.start].add(definition) else: self._offset_to_definitions[ref.filename][ref.start].add(definition) self._offset_to_implementations[ref.filename][ref.start].update(implementations) self._offset_to_references[ref.filename][ref.start] |= set(references) def _compute_offsets_to_ref_impl_decl(self): # pylint: disable=too-many-branches self._offset_to_references = defaultdict(lambda: defaultdict(lambda: set())) self._offset_to_definitions = defaultdict(lambda: defaultdict(lambda: set())) self._offset_to_implementations = defaultdict(lambda: defaultdict(lambda: set())) self._offset_to_objects = defaultdict(lambda: defaultdict(lambda: set())) self._offset_to_min_offset = defaultdict(lambda: defaultdict(lambda: set())) for compilation_unit in self._compilation_units: for contract in compilation_unit.contracts: self._compute_offsets_from_thing(contract) for function in contract.functions_declared: self._compute_offsets_from_thing(function) for variable in function.local_variables: self._compute_offsets_from_thing(variable) for modifier in contract.modifiers_declared: self._compute_offsets_from_thing(modifier) for variable in modifier.local_variables: self._compute_offsets_from_thing(variable) for var in contract.state_variables: self._compute_offsets_from_thing(var) for st in contract.structures: self._compute_offsets_from_thing(st) for enum in contract.enums: self._compute_offsets_from_thing(enum) for event in contract.events: self._compute_offsets_from_thing(event) for typ in contract.type_aliases: self._compute_offsets_from_thing(typ) for enum in compilation_unit.enums_top_level: self._compute_offsets_from_thing(enum) for event in compilation_unit.events_top_level: self._compute_offsets_from_thing(event) for function in compilation_unit.functions_top_level: self._compute_offsets_from_thing(function) for st in compilation_unit.structures_top_level: self._compute_offsets_from_thing(st) for var in compilation_unit.variables_top_level: self._compute_offsets_from_thing(var) for typ in compilation_unit.type_aliases.values(): self._compute_offsets_from_thing(typ) for err in compilation_unit.custom_errors: self._compute_offsets_from_thing(err) for event in compilation_unit.events_top_level: self._compute_offsets_from_thing(event) for import_directive in compilation_unit.import_directives: self._compute_offsets_from_thing(import_directive) for pragma in compilation_unit.pragma_directives: self._compute_offsets_from_thing(pragma) T = TypeVar("T", Source, SourceMapping) def _get_offset( self, mapping: Dict[Filename, Dict[int, Set[T]]], filename_str: str, offset: int ) -> Set[T]: """Get the Source/SourceMapping referenced by the offset. For performance reasons, references are only stored once at the lowest offset. It uses the _offset_to_min_offset mapping to retrieve the correct offsets. As multiple definitions can be related to the same offset, we retrieve all of them. :param mapping: Mapping to search for (objects. references, ...) :param filename_str: Filename to consider :param offset: Look-up offset :raises IndexError: When the start offset is not found :return: The corresponding set of Source/SourceMapping """ filename: Filename = self.crytic_compile.filename_lookup(filename_str) start_offsets = self._offset_to_min_offset[filename][offset] if not start_offsets: msg = f"Unable to find reference for offset {offset}" raise IndexError(msg) results = set() for start_offset in start_offsets: results |= mapping[filename][start_offset] return results def offset_to_references(self, filename_str: str, offset: int) -> Set[Source]: if self._offset_to_references is None: self._compute_offsets_to_ref_impl_decl() return self._get_offset(self._offset_to_references, filename_str, offset) def offset_to_implementations(self, filename_str: str, offset: int) -> Set[Source]: if self._offset_to_implementations is None: self._compute_offsets_to_ref_impl_decl() return self._get_offset(self._offset_to_implementations, filename_str, offset) def offset_to_definitions(self, filename_str: str, offset: int) -> Set[Source]: if self._offset_to_definitions is None: self._compute_offsets_to_ref_impl_decl() return self._get_offset(self._offset_to_definitions, filename_str, offset) def offset_to_objects(self, filename_str: str, offset: int) -> Set[SourceMapping]: if self._offset_to_objects is None: self._compute_offsets_to_ref_impl_decl() return self._get_offset(self._offset_to_objects, filename_str, offset) # endregion ################################################################################### ################################################################################### # region Filtering results ################################################################################### ################################################################################### def parse_ignore_comments(self, file: str) -> None: # The first time we check a file, find all start/end ignore comments and memoize them. line_number = 1 while True: line_text = self.crytic_compile.get_code_from_line(file, line_number) if line_text is None: break start_regex = r"^\s*//\s*slither-disable-start\s*([a-zA-Z0-9_,-]*)" end_regex = r"^\s*//\s*slither-disable-end\s*([a-zA-Z0-9_,-]*)" start_match = re.findall(start_regex, line_text.decode("utf8")) end_match = re.findall(end_regex, line_text.decode("utf8")) if start_match: ignored = start_match[0].split(",") if ignored: for check in ignored: vals = self._ignore_ranges[file][check] if len(vals) == 0 or vals[-1][1] != float("inf"): # First item in the array, or the prior item is fully populated. self._ignore_ranges[file][check].append((line_number, float("inf"))) else: logger.error( f"Consecutive slither-disable-starts without slither-disable-end in {file}#{line_number}" ) return if end_match: ignored = end_match[0].split(",") if ignored: for check in ignored: vals = self._ignore_ranges[file][check] if len(vals) == 0 or vals[-1][1] != float("inf"): logger.error( f"slither-disable-end without slither-disable-start in {file}#{line_number}" ) return self._ignore_ranges[file][check][-1] = (vals[-1][0], line_number) line_number += 1 def has_ignore_comment(self, r: Dict) -> bool: """ Check if the result has an ignore comment in the file or on the preceding line, in which case, it is not valid """ if not self.crytic_compile: return False mapping_elements_with_lines = ( ( posixpath.normpath(elem["source_mapping"]["filename_absolute"]), elem["source_mapping"]["lines"], ) for elem in r["elements"] if "source_mapping" in elem and "filename_absolute" in elem["source_mapping"] and "lines" in elem["source_mapping"] and len(elem["source_mapping"]["lines"]) > 0 ) for file, lines in mapping_elements_with_lines: # Check if result is within an ignored range. ignore_ranges = self._ignore_ranges[file][r["check"]] + self._ignore_ranges[file]["all"] for start, end in ignore_ranges: # The full check must be within the ignore range to be ignored. if start < lines[0] and end > lines[-1]: return True # Check for next-line matchers. ignore_line_index = min(lines) - 1 ignore_line_text = self.crytic_compile.get_code_from_line(file, ignore_line_index) if ignore_line_text: match = re.findall( r"^\s*//\s*slither-disable-next-line\s*([a-zA-Z0-9_,-]*)", ignore_line_text.decode("utf8"), ) if match: ignored = match[0].split(",") if ignored and ("all" in ignored or any(r["check"] == c for c in ignored)): return True return False def valid_result(self, r: Dict) -> bool: """ Check if the result is valid A result is invalid if: - All its source paths belong to the source path filtered - Or a similar result was reported and saved during a previous run - The --exclude-dependencies flag is set and results are only related to dependencies - There is an ignore comment on the preceding line or in the file """ # Remove duplicate due to the multiple compilation support if r["id"] in self._currently_seen_resuts: return False self._currently_seen_resuts.add(r["id"]) source_mapping_elements = [ elem["source_mapping"].get("filename_absolute", "unknown") for elem in r["elements"] if "source_mapping" in elem ] # Use POSIX-style paths so that filter_paths|include_paths works across different # OSes. Convert to a list so elements don't get consumed and are lost # while evaluating the first pattern source_mapping_elements = list( map(lambda x: pathlib.Path(x).resolve().as_posix() if x else x, source_mapping_elements) ) (matching, paths, msg_err) = ( (True, self._paths_to_include, "--include-paths") if self._paths_to_include else (False, self._paths_to_filter, "--filter-paths") ) for path in paths: try: if any( bool(re.search(_relative_path_format(path), src_mapping)) for src_mapping in source_mapping_elements ): matching = not matching break except re.error: logger.error( f"Incorrect regular expression for {msg_err} {path}." "\nSlither supports the Python re format" ": https://docs.python.org/3/library/re.html" ) if r["elements"] and matching: return False if self._show_ignored_findings: return True if self.has_ignore_comment(r): return False if r["id"] in self._previous_results_ids: return False if r["elements"] and self._exclude_dependencies: if all(element["source_mapping"]["is_dependency"] for element in r["elements"]): return False # Conserve previous result filtering. This is conserved for compatibility, but is meant to be removed if r["description"] in [pr["description"] for pr in self._previous_results]: return False return True def load_previous_results(self) -> None: self.load_previous_results_from_sarif() filename = self._previous_results_filename try: if os.path.isfile(filename): with open(filename, encoding="utf8") as f: self._previous_results = json.load(f) if self._previous_results: for r in self._previous_results: if "id" in r: self._previous_results_ids.add(r["id"]) except json.decoder.JSONDecodeError: logger.error(red(f"Impossible to decode {filename}. Consider removing the file")) def load_previous_results_from_sarif(self) -> None: sarif = pathlib.Path(self.sarif_input) triage = pathlib.Path(self.sarif_triage) if not sarif.exists(): return if not triage.exists(): return triaged = read_triage_info(sarif, triage) for id_triaged in triaged: self._previous_results_ids.add(id_triaged) def write_results_to_hide(self) -> None: if not self._results_to_hide: return filename = self._previous_results_filename with open(filename, "w", encoding="utf8") as f: results = self._results_to_hide + self._previous_results json.dump(results, f) def save_results_to_hide(self, results: List[Dict]) -> None: self._results_to_hide += results self.write_results_to_hide() def add_path_to_filter(self, path: str): """ Add path to filter Path are used through direct comparison (no regex) """ self._paths_to_filter.add(path) def add_path_to_include(self, path: str): """ Add path to include Path are used through direct comparison (no regex) """ self._paths_to_include.add(path) # endregion ################################################################################### ################################################################################### # region Crytic compile ################################################################################### ################################################################################### @property def crytic_compile(self) -> CryticCompile: return self._crytic_compile # type: ignore # endregion ################################################################################### ################################################################################### # region Format ################################################################################### ################################################################################### @property def generate_patches(self) -> bool: return self._generate_patches @generate_patches.setter def generate_patches(self, p: bool): self._generate_patches = p # endregion ################################################################################### ################################################################################### # region Internals ################################################################################### ################################################################################### @property def disallow_partial(self) -> bool: """ Return true if partial analyses are disallowed For example, codebase with duplicate names will lead to partial analyses :return: """ return self._disallow_partial @property def skip_assembly(self) -> bool: return self._skip_assembly @property def show_ignore_findings(self) -> bool: return self._show_ignored_findings # endregion
SlitherCore
python
openai__openai-python
src/openai/lib/azure.py
{ "start": 1628, "end": 3104 }
class ____(BaseClient[_HttpxClientT, _DefaultStreamT]): _azure_endpoint: httpx.URL | None _azure_deployment: str | None @override def _build_request( self, options: FinalRequestOptions, *, retries_taken: int = 0, ) -> httpx.Request: if options.url in _deployments_endpoints and is_mapping(options.json_data): model = options.json_data.get("model") if model is not None and "/deployments" not in str(self.base_url.path): options.url = f"/deployments/{model}{options.url}" return super()._build_request(options, retries_taken=retries_taken) @override def _prepare_url(self, url: str) -> httpx.URL: """Adjust the URL if the client was configured with an Azure endpoint + deployment and the API feature being called is **not** a deployments-based endpoint (i.e. requires /deployments/deployment-name in the URL path). """ if self._azure_deployment and self._azure_endpoint and url not in _deployments_endpoints: merge_url = httpx.URL(url) if merge_url.is_relative_url: merge_raw_path = ( self._azure_endpoint.raw_path.rstrip(b"/") + b"/openai/" + merge_url.raw_path.lstrip(b"/") ) return self._azure_endpoint.copy_with(raw_path=merge_raw_path) return merge_url return super()._prepare_url(url)
BaseAzureClient
python
huggingface__transformers
src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
{ "start": 46050, "end": 50790 }
class ____(RobertaPreLayerNormPreTrainedModel): def __init__(self, config): super().__init__(config) self.roberta_prelayernorm = RobertaPreLayerNormModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value >= 2. All the value in this tensor should be always < type_vocab_size. [What are token type IDs?](../glossary#token-type-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.roberta_prelayernorm( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, inputs_embeds=flat_inputs_embeds, return_dict=True, **kwargs, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: # move labels to correct device labels = labels.to(reshaped_logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring
RobertaPreLayerNormForMultipleChoice
python
huggingface__transformers
src/transformers/models/audioflamingo3/modeling_audioflamingo3.py
{ "start": 2921, "end": 8669 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, layer_idx: Optional[int] = None, config: Optional[AudioFlamingo3Config] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal if layer_idx is None and is_decoder: logger.warning_once( f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and " "will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.Tensor] = None, # TODO: we need a refactor so that the different attention modules can get their specific kwargs # ATM, we have mixed things encoder, decoder, and encoder-decoder attn **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] q_input_shape = (bsz, tgt_len, -1, self.head_dim) # Scaling is susceptible to floating point arithmetics' inprecisions # which can lead to different results (this is dependent from model # to model, e.g. audioflamingo3 is one such case). We therefore keep the # original order of scaling to follow the original implementation # and enforce no scaling (1.0) in the attention call below. query_states = self.q_proj(hidden_states) * self.scaling query_states = query_states.view(*q_input_shape) query_states = query_states.transpose(1, 2).contiguous() # Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache` if past_key_values is not None and isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache past_key_values.is_updated[self.layer_idx] = True past_key_values = past_key_values.cross_attention_cache else: past_key_values = past_key_values.self_attention_cache # use key_value_states if cross attention current_states = key_value_states if key_value_states is not None else hidden_states if is_cross_attention and past_key_values and is_updated: # reuse k,v, cross_attentions key_states = past_key_values.layers[self.layer_idx].keys value_states = past_key_values.layers[self.layer_idx].values else: key_states = self.k_proj(current_states).view(bsz, -1, self.num_heads, self.head_dim) value_states = self.v_proj(current_states).view(bsz, -1, self.num_heads, self.head_dim) key_states = key_states.transpose(1, 2).contiguous() value_states = value_states.transpose(1, 2).contiguous() if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = past_key_values.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=1.0, output_attentions=output_attentions, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights
AudioFlamingo3Attention
python
scrapy__scrapy
tests/mockserver/http_resources.py
{ "start": 3527, "end": 4021 }
class ____(LeafResource): def render_GET(self, request): n = getarg(request, b"n", 1, type_=float) b = getarg(request, b"b", 1, type_=int) if b: # send headers now and delay body request.write("") self.deferRequest(request, n, self._delayedRender, request, n) return NOT_DONE_YET def _delayedRender(self, request, n): request.write(to_bytes(f"Response delayed for {n:.3f} seconds\n")) request.finish()
Delay
python
pandas-dev__pandas
pandas/tests/series/test_constructors.py
{ "start": 957, "end": 82044 }
class ____: def test_from_ints_with_non_nano_dt64_dtype(self, index_or_series): values = np.arange(10) res = index_or_series(values, dtype="M8[s]") expected = index_or_series(values.astype("M8[s]")) tm.assert_equal(res, expected) res = index_or_series(list(values), dtype="M8[s]") tm.assert_equal(res, expected) def test_from_na_value_and_interval_of_datetime_dtype(self): # GH#41805 ser = Series([None], dtype="interval[datetime64[ns]]") assert ser.isna().all() assert ser.dtype == "interval[datetime64[ns], right]" def test_infer_with_date_and_datetime(self): # GH#49341 pre-2.0 we inferred datetime-and-date to datetime64, which # was inconsistent with Index behavior ts = Timestamp(2016, 1, 1) vals = [ts.to_pydatetime(), ts.date()] ser = Series(vals) expected = Series(vals, dtype=object) tm.assert_series_equal(ser, expected) idx = Index(vals) expected = Index(vals, dtype=object) tm.assert_index_equal(idx, expected) def test_unparsable_strings_with_dt64_dtype(self): # pre-2.0 these would be silently ignored and come back with object dtype vals = ["aa"] msg = "^Unknown datetime string format, unable to parse: aa$" with pytest.raises(ValueError, match=msg): Series(vals, dtype="datetime64[ns]") with pytest.raises(ValueError, match=msg): Series(np.array(vals, dtype=object), dtype="datetime64[ns]") def test_invalid_dtype_conversion_datetime_to_timedelta(self): # GH#60728 vals = Series([NaT, Timestamp(2025, 1, 1)], dtype="datetime64[ns]") msg = r"^Cannot cast DatetimeArray to dtype timedelta64\[ns\]$" with pytest.raises(TypeError, match=msg): Series(vals, dtype="timedelta64[ns]") @pytest.mark.parametrize( "constructor", [ # NOTE: some overlap with test_constructor_empty but that test does not # test for None or an empty generator. # test_constructor_pass_none tests None but only with the index also # passed. (lambda idx: Series(index=idx)), (lambda idx: Series(None, index=idx)), (lambda idx: Series({}, index=idx)), (lambda idx: Series((), index=idx)), (lambda idx: Series([], index=idx)), (lambda idx: Series((_ for _ in []), index=idx)), (lambda idx: Series(data=None, index=idx)), (lambda idx: Series(data={}, index=idx)), (lambda idx: Series(data=(), index=idx)), (lambda idx: Series(data=[], index=idx)), (lambda idx: Series(data=(_ for _ in []), index=idx)), ], ) @pytest.mark.parametrize("empty_index", [None, []]) def test_empty_constructor(self, constructor, empty_index): # GH 49573 (addition of empty_index parameter) expected = Series(index=empty_index) result = constructor(empty_index) assert result.dtype == object assert len(result.index) == 0 tm.assert_series_equal(result, expected, check_index_type=True) def test_invalid_dtype(self): # GH15520 msg = "not understood" invalid_list = [Timestamp, "Timestamp", list] for dtype in invalid_list: with pytest.raises(TypeError, match=msg): Series([], name="time", dtype=dtype) def test_invalid_compound_dtype(self): # GH#13296 c_dtype = np.dtype([("a", "i8"), ("b", "f4")]) cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype) with pytest.raises(ValueError, match="Use DataFrame instead"): Series(cdt_arr, index=["A", "B"]) def test_scalar_conversion(self): # Pass in scalar is disabled scalar = Series(0.5) assert not isinstance(scalar, float) def test_scalar_extension_dtype(self, ea_scalar_and_dtype): # GH 28401 ea_scalar, ea_dtype = ea_scalar_and_dtype ser = Series(ea_scalar, index=range(3)) expected = Series([ea_scalar] * 3, dtype=ea_dtype) assert ser.dtype == ea_dtype tm.assert_series_equal(ser, expected) def test_constructor(self, datetime_series, using_infer_string): empty_series = Series() assert datetime_series.index._is_all_dates # Pass in Series derived = Series(datetime_series) assert derived.index._is_all_dates tm.assert_index_equal(derived.index, datetime_series.index) # Ensure new index is not created assert id(datetime_series.index) == id(derived.index) # Mixed type Series mixed = Series(["hello", np.nan], index=[0, 1]) assert mixed.dtype == np.object_ if not using_infer_string else "str" assert np.isnan(mixed[1]) assert not empty_series.index._is_all_dates assert not Series().index._is_all_dates # exception raised is of type ValueError GH35744 with pytest.raises( ValueError, match=r"Data must be 1-dimensional, got ndarray of shape \(3, 3\) instead", ): Series(np.random.default_rng(2).standard_normal((3, 3)), index=np.arange(3)) mixed.name = "Series" rs = Series(mixed).name xp = "Series" assert rs == xp # raise on MultiIndex GH4187 m = MultiIndex.from_arrays([[1, 2], [3, 4]]) msg = "initializing a Series from a MultiIndex is not supported" with pytest.raises(NotImplementedError, match=msg): Series(m) def test_constructor_index_ndim_gt_1_raises(self): # GH#18579 df = DataFrame([[1, 2], [3, 4], [5, 6]], index=[3, 6, 9]) with pytest.raises(ValueError, match="Index data must be 1-dimensional"): Series([1, 3, 2], index=df) @pytest.mark.parametrize("input_class", [list, dict, OrderedDict]) def test_constructor_empty(self, input_class, using_infer_string): empty = Series() empty2 = Series(input_class()) # these are Index() and RangeIndex() which don't compare type equal # but are just .equals tm.assert_series_equal(empty, empty2, check_index_type=False) # With explicit dtype: empty = Series(dtype="float64") empty2 = Series(input_class(), dtype="float64") tm.assert_series_equal(empty, empty2, check_index_type=False) # GH 18515 : with dtype=category: empty = Series(dtype="category") empty2 = Series(input_class(), dtype="category") tm.assert_series_equal(empty, empty2, check_index_type=False) if input_class is not list: # With index: empty = Series(index=range(10)) empty2 = Series(input_class(), index=range(10)) tm.assert_series_equal(empty, empty2) # With index and dtype float64: empty = Series(np.nan, index=range(10)) empty2 = Series(input_class(), index=range(10), dtype="float64") tm.assert_series_equal(empty, empty2) # GH 19853 : with empty string, index and dtype str empty = Series("", dtype=str, index=range(3)) if using_infer_string: empty2 = Series("", index=range(3), dtype="str") else: empty2 = Series("", index=range(3)) tm.assert_series_equal(empty, empty2) @pytest.mark.parametrize("input_arg", [np.nan, float("nan")]) def test_constructor_nan(self, input_arg): empty = Series(dtype="float64", index=range(10)) empty2 = Series(input_arg, index=range(10)) tm.assert_series_equal(empty, empty2, check_index_type=False) @pytest.mark.parametrize( "dtype", ["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"], ) @pytest.mark.parametrize("index", [None, Index([])]) def test_constructor_dtype_only(self, dtype, index): # GH-20865 result = Series(dtype=dtype, index=index) assert result.dtype == dtype assert len(result) == 0 def test_constructor_no_data_index_order(self): result = Series(index=["b", "a", "c"]) assert result.index.tolist() == ["b", "a", "c"] def test_constructor_no_data_string_type(self): # GH 22477 result = Series(index=[1], dtype=str) assert np.isnan(result.iloc[0]) @pytest.mark.parametrize("item", ["entry", "ѐ", 13]) def test_constructor_string_element_string_type(self, item): # GH 22477 result = Series(item, index=[1], dtype=str) assert result.iloc[0] == str(item) def test_constructor_dtype_str_na_values(self, string_dtype): # https://github.com/pandas-dev/pandas/issues/21083 ser = Series(["x", None], dtype=string_dtype) result = ser.isna() expected = Series([False, True]) tm.assert_series_equal(result, expected) assert ser.iloc[1] is None ser = Series(["x", np.nan], dtype=string_dtype) assert np.isnan(ser.iloc[1]) def test_constructor_series(self): index1 = ["d", "b", "a", "c"] index2 = sorted(index1) s1 = Series([4, 7, -5, 3], index=index1) s2 = Series(s1, index=index2) tm.assert_series_equal(s2, s1.sort_index()) def test_constructor_iterable(self): # GH 21987 class Iter: def __iter__(self) -> Iterator: yield from range(10) expected = Series(list(range(10)), dtype="int64") result = Series(Iter(), dtype="int64") tm.assert_series_equal(result, expected) def test_constructor_sequence(self): # GH 21987 expected = Series(list(range(10)), dtype="int64") result = Series(range(10), dtype="int64") tm.assert_series_equal(result, expected) def test_constructor_single_str(self): # GH 21987 expected = Series(["abc"]) result = Series("abc") tm.assert_series_equal(result, expected) def test_constructor_list_like(self): # make sure that we are coercing different # list-likes to standard dtypes and not # platform specific expected = Series([1, 2, 3], dtype="int64") for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]: result = Series(obj, index=[0, 1, 2]) tm.assert_series_equal(result, expected) def test_constructor_boolean_index(self): # GH#18579 s1 = Series([1, 2, 3], index=[4, 5, 6]) index = s1 == 2 result = Series([1, 3, 2], index=index) expected = Series([1, 3, 2], index=[False, True, False]) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"]) def test_constructor_index_dtype(self, dtype): # GH 17088 s = Series(Index([0, 2, 4]), dtype=dtype) assert s.dtype == dtype @pytest.mark.parametrize( "input_vals", [ [1, 2], ["1", "2"], list(date_range("1/1/2011", periods=2, freq="h")), list(date_range("1/1/2011", periods=2, freq="h", tz="US/Eastern")), [Interval(left=0, right=5)], ], ) def test_constructor_list_str(self, input_vals, string_dtype): # GH 16605 # Ensure that data elements from a list are converted to strings # when dtype is str, 'str', or 'U' result = Series(input_vals, dtype=string_dtype) expected = Series(input_vals).astype(string_dtype) tm.assert_series_equal(result, expected) def test_constructor_list_str_na(self, string_dtype): result = Series([1.0, 2.0, np.nan], dtype=string_dtype) expected = Series(["1.0", "2.0", np.nan], dtype=object) tm.assert_series_equal(result, expected) assert np.isnan(result[2]) def test_constructor_generator(self): gen = (i for i in range(10)) result = Series(gen) exp = Series(range(10)) tm.assert_series_equal(result, exp) # same but with non-default index gen = (i for i in range(10)) result = Series(gen, index=range(10, 20)) exp.index = range(10, 20) tm.assert_series_equal(result, exp) def test_constructor_map(self): # GH8909 m = (x for x in range(10)) result = Series(m) exp = Series(range(10)) tm.assert_series_equal(result, exp) # same but with non-default index m = (x for x in range(10)) result = Series(m, index=range(10, 20)) exp.index = range(10, 20) tm.assert_series_equal(result, exp) def test_constructor_categorical(self): msg = "Constructing a Categorical with a dtype and values containing" with tm.assert_produces_warning(Pandas4Warning, match=msg): cat = Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"]) res = Series(cat) tm.assert_categorical_equal(res.values, cat) # can cast to a new dtype result = Series(Categorical([1, 2, 3]), dtype="int64") expected = Series([1, 2, 3], dtype="int64") tm.assert_series_equal(result, expected) def test_construct_from_categorical_with_dtype(self): # GH12574 ser = Series(Categorical([1, 2, 3]), dtype="category") assert isinstance(ser.dtype, CategoricalDtype) def test_construct_intlist_values_category_dtype(self): ser = Series([1, 2, 3], dtype="category") assert isinstance(ser.dtype, CategoricalDtype) def test_constructor_categorical_with_coercion(self): factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]) # test basic creation / coercion of categoricals s = Series(factor, name="A") assert s.dtype == "category" assert len(s) == len(factor) # in a frame df = DataFrame({"A": factor}) result = df["A"] tm.assert_series_equal(result, s) result = df.iloc[:, 0] tm.assert_series_equal(result, s) assert len(df) == len(factor) df = DataFrame({"A": s}) result = df["A"] tm.assert_series_equal(result, s) assert len(df) == len(factor) # multiples df = DataFrame({"A": s, "B": s, "C": 1}) result1 = df["A"] result2 = df["B"] tm.assert_series_equal(result1, s) tm.assert_series_equal(result2, s, check_names=False) assert result2.name == "B" assert len(df) == len(factor) def test_constructor_categorical_with_coercion2(self): # GH8623 x = DataFrame( [[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]], columns=["person_id", "person_name"], ) x["person_name"] = Categorical(x.person_name) # doing this breaks transform expected = x.iloc[0].person_name result = x.person_name.iloc[0] assert result == expected result = x.person_name[0] assert result == expected result = x.person_name.loc[0] assert result == expected def test_constructor_series_to_categorical(self): # see GH#16524: test conversion of Series to Categorical series = Series(["a", "b", "c"]) result = Series(series, dtype="category") expected = Series(["a", "b", "c"], dtype="category") tm.assert_series_equal(result, expected) def test_constructor_categorical_dtype(self): result = Series( ["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True) ) assert isinstance(result.dtype, CategoricalDtype) tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"])) assert result.cat.ordered result = Series(["a", "b"], dtype=CategoricalDtype(["b", "a"])) assert isinstance(result.dtype, CategoricalDtype) tm.assert_index_equal(result.cat.categories, Index(["b", "a"])) assert result.cat.ordered is False # GH 19565 - Check broadcasting of scalar with Categorical dtype result = Series( "a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True) ) expected = Series( ["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True) ) tm.assert_series_equal(result, expected) def test_constructor_categorical_string(self): # GH 26336: the string 'category' maintains existing CategoricalDtype cdt = CategoricalDtype(categories=list("dabc"), ordered=True) expected = Series(list("abcabc"), dtype=cdt) # Series(Categorical, dtype='category') keeps existing dtype cat = Categorical(list("abcabc"), dtype=cdt) result = Series(cat, dtype="category") tm.assert_series_equal(result, expected) # Series(Series[Categorical], dtype='category') keeps existing dtype result = Series(result, dtype="category") tm.assert_series_equal(result, expected) def test_categorical_sideeffects_free(self): # Passing a categorical to a Series and then changing values in either # the series or the categorical should not change the values in the # other one, IF you specify copy! cat = Categorical(["a", "b", "c", "a"]) s = Series(cat, copy=True) assert s.cat is not cat s = s.cat.rename_categories([1, 2, 3]) exp_s = np.array([1, 2, 3, 1], dtype=np.int64) exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_) tm.assert_numpy_array_equal(s.__array__(), exp_s) tm.assert_numpy_array_equal(cat.__array__(), exp_cat) # setting s[0] = 2 exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64) tm.assert_numpy_array_equal(s.__array__(), exp_s2) tm.assert_numpy_array_equal(cat.__array__(), exp_cat) # however, copy is False by default # so this WILL change values cat = Categorical(["a", "b", "c", "a"]) s = Series(cat, copy=False) assert s._values is cat s = s.cat.rename_categories([1, 2, 3]) assert s._values is not cat exp_s = np.array([1, 2, 3, 1], dtype=np.int64) tm.assert_numpy_array_equal(s.__array__(), exp_s) s[0] = 2 exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64) tm.assert_numpy_array_equal(s.__array__(), exp_s2) def test_unordered_compare_equal(self): left = Series(["a", "b", None], dtype=CategoricalDtype(["a", "b"])) right = Series(Categorical(["a", "b", np.nan], categories=["a", "b"])) tm.assert_series_equal(left, right) def test_constructor_maskedarray(self): data = ma.masked_all((3,), dtype=float) result = Series(data) expected = Series([np.nan, np.nan, np.nan]) tm.assert_series_equal(result, expected) data[0] = 0.0 data[2] = 2.0 index = ["a", "b", "c"] result = Series(data, index=index) expected = Series([0.0, np.nan, 2.0], index=index) tm.assert_series_equal(result, expected) data[1] = 1.0 result = Series(data, index=index) expected = Series([0.0, 1.0, 2.0], index=index) tm.assert_series_equal(result, expected) data = ma.masked_all((3,), dtype=int) result = Series(data) expected = Series([np.nan, np.nan, np.nan], dtype=float) tm.assert_series_equal(result, expected) data[0] = 0 data[2] = 2 index = ["a", "b", "c"] result = Series(data, index=index) expected = Series([0, np.nan, 2], index=index, dtype=float) tm.assert_series_equal(result, expected) data[1] = 1 result = Series(data, index=index) expected = Series([0, 1, 2], index=index, dtype=int) with pytest.raises(AssertionError, match="Series classes are different"): # TODO should this be raising at all? # https://github.com/pandas-dev/pandas/issues/56131 tm.assert_series_equal(result, expected) data = ma.masked_all((3,), dtype=bool) result = Series(data) expected = Series([np.nan, np.nan, np.nan], dtype=object) tm.assert_series_equal(result, expected) data[0] = True data[2] = False index = ["a", "b", "c"] result = Series(data, index=index) expected = Series([True, np.nan, False], index=index, dtype=object) tm.assert_series_equal(result, expected) data[1] = True result = Series(data, index=index) expected = Series([True, True, False], index=index, dtype=bool) with pytest.raises(AssertionError, match="Series classes are different"): # TODO should this be raising at all? # https://github.com/pandas-dev/pandas/issues/56131 tm.assert_series_equal(result, expected) data = ma.masked_all((3,), dtype="M8[ns]") result = Series(data) expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]") tm.assert_series_equal(result, expected) data[0] = datetime(2001, 1, 1) data[2] = datetime(2001, 1, 3) index = ["a", "b", "c"] result = Series(data, index=index) expected = Series( [datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)], index=index, dtype="M8[ns]", ) tm.assert_series_equal(result, expected) data[1] = datetime(2001, 1, 2) result = Series(data, index=index) expected = Series( [datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)], index=index, dtype="M8[ns]", ) tm.assert_series_equal(result, expected) def test_constructor_maskedarray_hardened(self): # Check numpy masked arrays with hard masks -- from GH24574 data = ma.masked_all((3,), dtype=float).harden_mask() result = Series(data) expected = Series([np.nan, np.nan, np.nan]) tm.assert_series_equal(result, expected) def test_series_ctor_plus_datetimeindex(self): rng = date_range("20090415", "20090519", freq="B") data = dict.fromkeys(rng, 1) result = Series(data, index=rng) assert result.index.is_(rng) def test_constructor_default_index(self): s = Series([0, 1, 2]) tm.assert_index_equal(s.index, Index(range(3)), exact=True) @pytest.mark.parametrize( "input", [ [1, 2, 3], (1, 2, 3), list(range(3)), Categorical(["a", "b", "a"]), (i for i in range(3)), (x for x in range(3)), ], ) def test_constructor_index_mismatch(self, input): # GH 19342 # test that construction of a Series with an index of different length # raises an error msg = r"Length of values \(3\) does not match length of index \(4\)" with pytest.raises(ValueError, match=msg): Series(input, index=np.arange(4)) def test_constructor_numpy_scalar(self): # GH 19342 # construction with a numpy scalar # should not raise result = Series(np.array(100), index=np.arange(4), dtype="int64") expected = Series(100, index=np.arange(4), dtype="int64") tm.assert_series_equal(result, expected) def test_constructor_broadcast_list(self): # GH 19342 # construction with single-element container and index # should raise msg = r"Length of values \(1\) does not match length of index \(3\)" with pytest.raises(ValueError, match=msg): Series(["foo"], index=["a", "b", "c"]) def test_constructor_corner(self): df = DataFrame(range(5), index=date_range("2020-01-01", periods=5)) objs = [df, df] s = Series(objs, index=[0, 1]) assert isinstance(s, Series) def test_constructor_sanitize(self): s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8") assert s.dtype == np.dtype("i8") msg = r"Cannot convert non-finite values \(NA or inf\) to integer" with pytest.raises(IntCastingNaNError, match=msg): Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8") def test_constructor_copy(self): # GH15125 # test dtype parameter has no side effects on copy=True for data in [[1.0], np.array([1.0])]: x = Series(data) y = Series(x, copy=True, dtype=float) # copy=True maintains original data in Series tm.assert_series_equal(x, y) # changes to origin of copy does not affect the copy x[0] = 2.0 assert not x.equals(y) assert x[0] == 2.0 assert y[0] == 1.0 @pytest.mark.parametrize( "index", [ date_range("20170101", periods=3, tz="US/Eastern"), date_range("20170101", periods=3), timedelta_range("1 day", periods=3), period_range("2012Q1", periods=3, freq="Q"), Index(list("abc")), Index([1, 2, 3]), RangeIndex(0, 3), ], ids=lambda x: type(x).__name__, ) def test_constructor_limit_copies(self, index): # GH 17449 # limit copies of input s = Series(index) # we make 1 copy; this is just a smoke test here assert s._mgr.blocks[0].values is not index def test_constructor_shallow_copy(self): # constructing a Series from Series with copy=False should still # give a "shallow" copy (share data, not attributes) # https://github.com/pandas-dev/pandas/issues/49523 s = Series([1, 2, 3]) s_orig = s.copy() s2 = Series(s) assert s2._mgr is not s._mgr # Overwriting index of s2 doesn't change s s2.index = ["a", "b", "c"] tm.assert_series_equal(s, s_orig) def test_constructor_pass_none(self): s = Series(None, index=range(5)) assert s.dtype == np.float64 s = Series(None, index=range(5), dtype=object) assert s.dtype == np.object_ # GH 7431 # inference on the index s = Series(index=np.array([None])) expected = Series(index=Index([None])) tm.assert_series_equal(s, expected) def test_constructor_pass_nan_nat(self): # GH 13467 exp = Series([np.nan, np.nan], dtype=np.float64) assert exp.dtype == np.float64 tm.assert_series_equal(Series([np.nan, np.nan]), exp) tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp) exp = Series([NaT, NaT]) assert exp.dtype == "datetime64[s]" tm.assert_series_equal(Series([NaT, NaT]), exp) tm.assert_series_equal(Series(np.array([NaT, NaT])), exp) tm.assert_series_equal(Series([NaT, np.nan]), exp) tm.assert_series_equal(Series(np.array([NaT, np.nan])), exp) tm.assert_series_equal(Series([np.nan, NaT]), exp) tm.assert_series_equal(Series(np.array([np.nan, NaT])), exp) def test_constructor_cast(self): msg = "could not convert string to float" with pytest.raises(ValueError, match=msg): Series(["a", "b", "c"], dtype=float) def test_constructor_signed_int_overflow_raises(self): # GH#41734 disallow silent overflow, enforced in 2.0 if np_version_gt2: msg = "The elements provided in the data cannot all be casted to the dtype" err = OverflowError else: msg = "Values are too large to be losslessly converted" err = ValueError with pytest.raises(err, match=msg): Series([1, 200, 923442], dtype="int8") with pytest.raises(err, match=msg): Series([1, 200, 923442], dtype="uint8") @pytest.mark.parametrize( "values", [ np.array([1], dtype=np.uint16), np.array([1], dtype=np.uint32), np.array([1], dtype=np.uint64), [np.uint16(1)], [np.uint32(1)], [np.uint64(1)], ], ) def test_constructor_numpy_uints(self, values): # GH#47294 value = values[0] result = Series(values) assert result[0].dtype == value.dtype assert result[0] == value def test_constructor_unsigned_dtype_overflow(self, any_unsigned_int_numpy_dtype): # see gh-15832 if np_version_gt2: msg = ( f"The elements provided in the data cannot " f"all be casted to the dtype {any_unsigned_int_numpy_dtype}" ) else: msg = "Trying to coerce negative values to unsigned integers" with pytest.raises(OverflowError, match=msg): Series([-1], dtype=any_unsigned_int_numpy_dtype) def test_constructor_floating_data_int_dtype(self, frame_or_series): # GH#40110 arr = np.random.default_rng(2).standard_normal(2) # Long-standing behavior (for Series, new in 2.0 for DataFrame) # has been to ignore the dtype on these; # not clear if this is what we want long-term # expected = frame_or_series(arr) # GH#49599 as of 2.0 we raise instead of silently retaining float dtype msg = "Trying to coerce float values to integer" with pytest.raises(ValueError, match=msg): frame_or_series(arr, dtype="i8") with pytest.raises(ValueError, match=msg): frame_or_series(list(arr), dtype="i8") # pre-2.0, when we had NaNs, we silently ignored the integer dtype arr[0] = np.nan # expected = frame_or_series(arr) msg = r"Cannot convert non-finite values \(NA or inf\) to integer" with pytest.raises(IntCastingNaNError, match=msg): frame_or_series(arr, dtype="i8") exc = IntCastingNaNError if frame_or_series is Series: # TODO: try to align these exc = ValueError msg = "cannot convert float NaN to integer" with pytest.raises(exc, match=msg): # same behavior if we pass list instead of the ndarray frame_or_series(list(arr), dtype="i8") # float array that can be losslessly cast to integers arr = np.array([1.0, 2.0], dtype="float64") expected = frame_or_series(arr.astype("i8")) obj = frame_or_series(arr, dtype="i8") tm.assert_equal(obj, expected) obj = frame_or_series(list(arr), dtype="i8") tm.assert_equal(obj, expected) def test_constructor_coerce_float_fail(self, any_int_numpy_dtype): # see gh-15832 # Updated: make sure we treat this list the same as we would treat # the equivalent ndarray # GH#49599 pre-2.0 we silently retained float dtype, in 2.0 we raise vals = [1, 2, 3.5] msg = "Trying to coerce float values to integer" with pytest.raises(ValueError, match=msg): Series(vals, dtype=any_int_numpy_dtype) with pytest.raises(ValueError, match=msg): Series(np.array(vals), dtype=any_int_numpy_dtype) def test_constructor_coerce_float_valid(self, float_numpy_dtype): s = Series([1, 2, 3.5], dtype=float_numpy_dtype) expected = Series([1, 2, 3.5]).astype(float_numpy_dtype) tm.assert_series_equal(s, expected) def test_constructor_invalid_coerce_ints_with_float_nan(self, any_int_numpy_dtype): # GH 22585 # Updated: make sure we treat this list the same as we would treat the # equivalent ndarray vals = [1, 2, np.nan] # pre-2.0 this would return with a float dtype, in 2.0 we raise msg = "cannot convert float NaN to integer" with pytest.raises(ValueError, match=msg): Series(vals, dtype=any_int_numpy_dtype) msg = r"Cannot convert non-finite values \(NA or inf\) to integer" with pytest.raises(IntCastingNaNError, match=msg): Series(np.array(vals), dtype=any_int_numpy_dtype) def test_constructor_dtype_no_cast(self): # see gh-1572 s = Series([1, 2, 3]) s2 = Series(s, dtype=np.int64) s2[1] = 5 assert s[1] == 2 def test_constructor_datelike_coercion(self): # GH 9477 # incorrectly inferring on dateimelike looking when object dtype is # specified s = Series([Timestamp("20130101"), "NOV"], dtype=object) assert s.iloc[0] == Timestamp("20130101") assert s.iloc[1] == "NOV" assert s.dtype == object def test_constructor_datelike_coercion2(self): # the dtype was being reset on the slicing and re-inferred to datetime # even thought the blocks are mixed belly = "216 3T19".split() wing1 = "2T15 4H19".split() wing2 = "416 4T20".split() mat = pd.to_datetime("2016-01-22 2019-09-07".split()) df = DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly) result = df.loc["3T19"] assert result.dtype == object result = df.loc["216"] assert result.dtype == object def test_constructor_mixed_int_and_timestamp(self, frame_or_series): # specifically Timestamp with nanos, not datetimes objs = [Timestamp(9), 10, NaT._value] result = frame_or_series(objs, dtype="M8[ns]") expected = frame_or_series([Timestamp(9), Timestamp(10), NaT]) tm.assert_equal(result, expected) def test_constructor_datetimes_with_nulls(self): # gh-15869 for arr in [ np.array([None, None, None, None, datetime.now(), None]), np.array([None, None, datetime.now(), None]), ]: result = Series(arr) assert result.dtype == "M8[us]" def test_constructor_dtype_datetime64(self): s = Series(iNaT, dtype="M8[ns]", index=range(5)) assert isna(s).all() # in theory this should be all nulls, but since # we are not specifying a dtype is ambiguous s = Series(iNaT, index=range(5)) assert not isna(s).all() s = Series(np.nan, dtype="M8[ns]", index=range(5)) assert isna(s).all() s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]") assert isna(s[1]) assert s.dtype == "M8[ns]" s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]") assert isna(s[1]) assert s.dtype == "M8[ns]" def test_constructor_dtype_datetime64_10(self): # GH3416 pydates = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)] dates = [np.datetime64(x) for x in pydates] ser = Series(dates) assert ser.dtype == "M8[us]" ser.iloc[0] = np.nan assert ser.dtype == "M8[us]" # GH3414 related expected = Series(pydates, dtype="datetime64[ms]") result = Series(Series(dates).astype(np.int64) / 1000, dtype="M8[ms]") tm.assert_series_equal(result, expected) result = Series(dates, dtype="datetime64[ms]") tm.assert_series_equal(result, expected) expected = Series( [NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]" ) result = Series([np.nan] + dates[1:], dtype="datetime64[ns]") tm.assert_series_equal(result, expected) def test_constructor_dtype_datetime64_11(self): pydates = [datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)] dates = [np.datetime64(x) for x in pydates] dts = Series(dates, dtype="datetime64[ns]") # valid astype dts.astype("int64") # invalid casting msg = r"Converting from datetime64\[ns\] to int32 is not supported" with pytest.raises(TypeError, match=msg): dts.astype("int32") # ints are ok # we test with np.int64 to get similar results on # windows / 32-bit platforms result = Series(dts, dtype=np.int64) expected = Series(dts.astype(np.int64)) tm.assert_series_equal(result, expected) def test_constructor_dtype_datetime64_9(self): # invalid dates can be help as object result = Series([datetime(2, 1, 1)]) assert result[0] == datetime(2, 1, 1, 0, 0) result = Series([datetime(3000, 1, 1)]) assert result[0] == datetime(3000, 1, 1, 0, 0) def test_constructor_dtype_datetime64_8(self): # don't mix types result = Series([Timestamp("20130101"), 1], index=["a", "b"]) assert result["a"] == Timestamp("20130101") assert result["b"] == 1 def test_constructor_dtype_datetime64_7(self): # GH6529 # coerce datetime64 non-ns properly dates = date_range("01-Jan-2015", "01-Dec-2015", freq="ME") values2 = dates.view(np.ndarray).astype("datetime64[ns]") expected = Series(values2, index=dates) for unit in ["s", "D", "ms", "us", "ns"]: dtype = np.dtype(f"M8[{unit}]") values1 = dates.view(np.ndarray).astype(dtype) result = Series(values1, dates) if unit == "D": # for unit="D" we cast to nearest-supported reso, i.e. "s" dtype = np.dtype("M8[s]") assert result.dtype == dtype tm.assert_series_equal(result, expected.astype(dtype)) # GH 13876 # coerce to non-ns to object properly expected = Series(values2, index=dates, dtype=object) for dtype in ["s", "D", "ms", "us", "ns"]: values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]") result = Series(values1, index=dates, dtype=object) tm.assert_series_equal(result, expected) # leave datetime.date alone dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object) series1 = Series(dates2, dates) tm.assert_numpy_array_equal(series1.values, dates2) assert series1.dtype == object def test_constructor_dtype_datetime64_6(self): # as of 2.0, these no longer infer datetime64 based on the strings, # matching the Index behavior ser = Series([None, NaT, "2013-08-05 15:30:00.000001"]) assert ser.dtype == object ser = Series([np.nan, NaT, "2013-08-05 15:30:00.000001"]) assert ser.dtype == object ser = Series([NaT, None, "2013-08-05 15:30:00.000001"]) assert ser.dtype == object ser = Series([NaT, np.nan, "2013-08-05 15:30:00.000001"]) assert ser.dtype == object def test_constructor_dtype_datetime64_5(self): # tz-aware (UTC and other tz's) # GH 8411 dr = date_range("20130101", periods=3) assert Series(dr).iloc[0].tz is None dr = date_range("20130101", periods=3, tz="UTC") assert str(Series(dr).iloc[0].tz) == "UTC" dr = date_range("20130101", periods=3, tz="US/Eastern") assert str(Series(dr).iloc[0].tz) == "US/Eastern" def test_constructor_dtype_datetime64_4(self): # non-convertible ser = Series([1479596223000, -1479590, NaT]) assert ser.dtype == "object" assert ser[2] is NaT assert "NaT" in str(ser) def test_constructor_dtype_datetime64_3(self): # if we passed a NaT it remains ser = Series([datetime(2010, 1, 1), datetime(2, 1, 1), NaT]) assert ser.dtype == "M8[us]" assert ser[2] is NaT assert "NaT" in str(ser) def test_constructor_dtype_datetime64_2(self): # if we passed a nan it remains ser = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan]) assert ser.dtype == "M8[us]" assert ser[2] is NaT assert "NaT" in str(ser) def test_constructor_with_datetime_tz(self): # 8260 # support datetime64 with tz dr = date_range("20130101", periods=3, tz="US/Eastern", unit="ns") s = Series(dr) assert s.dtype.name == "datetime64[ns, US/Eastern]" assert s.dtype == "datetime64[ns, US/Eastern]" assert isinstance(s.dtype, DatetimeTZDtype) assert "datetime64[ns, US/Eastern]" in str(s) # export result = s.values assert isinstance(result, np.ndarray) assert result.dtype == "datetime64[ns]" exp = DatetimeIndex(result) exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz) tm.assert_index_equal(dr, exp) # indexing result = s.iloc[0] assert result == Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern") result = s[0] assert result == Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern") result = s[Series([True, True, False], index=s.index)] tm.assert_series_equal(result, s[0:2]) result = s.iloc[0:1] tm.assert_series_equal(result, Series(dr[0:1])) # concat result = pd.concat([s.iloc[0:1], s.iloc[1:]]) tm.assert_series_equal(result, s) # short str assert "datetime64[ns, US/Eastern]" in str(s) # formatting with NaT result = s.shift() assert "datetime64[ns, US/Eastern]" in str(result) assert "NaT" in str(result) result = DatetimeIndex(s, freq="infer") tm.assert_index_equal(result, dr) def test_constructor_with_datetime_tz5(self): # long str ser = Series(date_range("20130101", periods=1000, tz="US/Eastern", unit="ns")) assert "datetime64[ns, US/Eastern]" in str(ser) def test_constructor_with_datetime_tz4(self): # inference ser = Series( [ Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific").as_unit("s"), Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific").as_unit("s"), ] ) assert ser.dtype == "datetime64[s, US/Pacific]" assert lib.infer_dtype(ser, skipna=True) == "datetime64" def test_constructor_with_datetime_tz3(self): ser = Series( [ Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"), ] ) assert ser.dtype == "object" assert lib.infer_dtype(ser, skipna=True) == "datetime" def test_constructor_with_datetime_tz2(self): # with all NaT ser = Series(NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]") dti = DatetimeIndex(["NaT", "NaT"], tz="US/Eastern").as_unit("ns") expected = Series(dti) tm.assert_series_equal(ser, expected) def test_constructor_no_partial_datetime_casting(self): # GH#40111 vals = [ "nan", Timestamp("1990-01-01"), "2015-03-14T16:15:14.123-08:00", "2019-03-04T21:56:32.620-07:00", None, ] ser = Series(vals) assert all(ser[i] is vals[i] for i in range(len(vals))) @pytest.mark.parametrize("arr_dtype", [np.int64, np.float64]) @pytest.mark.parametrize("kind", ["M", "m"]) @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"]) def test_construction_to_datetimelike_unit(self, arr_dtype, kind, unit): # tests all units # gh-19223 # TODO: GH#19223 was about .astype, doesn't belong here dtype = f"{kind}8[{unit}]" arr = np.array([1, 2, 3], dtype=arr_dtype) ser = Series(arr) result = ser.astype(dtype) expected = Series(arr.astype(dtype)) if unit in ["ns", "us", "ms", "s"]: assert result.dtype == dtype assert expected.dtype == dtype else: # Otherwise we cast to nearest-supported unit, i.e. seconds assert result.dtype == f"{kind}8[s]" assert expected.dtype == f"{kind}8[s]" tm.assert_series_equal(result, expected) @pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", NaT, np.nan, None]) def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg): # GH 17415: With naive string result = Series([arg], dtype="datetime64[ns, CET]") expected = Series([Timestamp(arg)], dtype="M8[ns]").dt.tz_localize("CET") tm.assert_series_equal(result, expected) def test_constructor_datetime64_bigendian(self): # GH#30976 ms = np.datetime64(1, "ms") arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]") result = Series(arr) expected = Series([Timestamp(ms)]).astype("M8[ms]") assert expected.dtype == "M8[ms]" tm.assert_series_equal(result, expected) @pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray]) def test_construction_interval(self, interval_constructor): # construction from interval & array of intervals intervals = interval_constructor.from_breaks(np.arange(3), closed="right") result = Series(intervals) assert result.dtype == "interval[int64, right]" tm.assert_index_equal(Index(result.values), Index(intervals)) @pytest.mark.parametrize( "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] ) def test_constructor_infer_interval(self, data_constructor): # GH 23563: consistent closed results in interval dtype data = [Interval(0, 1), Interval(0, 2), None] result = Series(data_constructor(data)) expected = Series(IntervalArray(data)) assert result.dtype == "interval[float64, right]" tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] ) def test_constructor_interval_mixed_closed(self, data_constructor): # GH 23563: mixed closed results in object dtype (not interval dtype) data = [Interval(0, 1, closed="both"), Interval(0, 2, closed="neither")] result = Series(data_constructor(data)) assert result.dtype == object assert result.tolist() == data def test_construction_consistency(self): # make sure that we are not re-localizing upon construction # GH 14928 ser = Series(date_range("20130101", periods=3, tz="US/Eastern")) result = Series(ser, dtype=ser.dtype) tm.assert_series_equal(result, ser) result = Series(ser.dt.tz_convert("UTC"), dtype=ser.dtype) tm.assert_series_equal(result, ser) # Pre-2.0 dt64 values were treated as utc, which was inconsistent # with DatetimeIndex, which treats them as wall times, see GH#33401 result = Series(ser.values, dtype=ser.dtype) expected = Series(ser.values).dt.tz_localize(ser.dtype.tz) tm.assert_series_equal(result, expected) with tm.assert_produces_warning(None): # one suggested alternative to the deprecated (changed in 2.0) usage middle = Series(ser.values).dt.tz_localize("UTC") result = middle.dt.tz_convert(ser.dtype.tz) tm.assert_series_equal(result, ser) with tm.assert_produces_warning(None): # the other suggested alternative to the deprecated usage result = Series(ser.values.view("int64"), dtype=ser.dtype) tm.assert_series_equal(result, ser) @pytest.mark.parametrize( "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] ) def test_constructor_infer_period(self, data_constructor): data = [Period("2000", "D"), Period("2001", "D"), None] result = Series(data_constructor(data)) expected = Series(period_array(data)) tm.assert_series_equal(result, expected) assert result.dtype == "Period[D]" @pytest.mark.xfail(reason="PeriodDtype Series not supported yet") def test_construct_from_ints_including_iNaT_scalar_period_dtype(self): series = Series([0, 1000, 2000, pd._libs.iNaT], dtype="period[D]") val = series[3] assert isna(val) series[2] = val assert isna(series[2]) def test_constructor_period_incompatible_frequency(self): data = [Period("2000", "D"), Period("2001", "Y")] result = Series(data) assert result.dtype == object assert result.tolist() == data def test_constructor_periodindex(self): # GH7932 # converting a PeriodIndex when put in a Series pi = period_range("20130101", periods=5, freq="D") s = Series(pi) assert s.dtype == "Period[D]" expected = Series(pi.astype(object)) assert expected.dtype == object def test_constructor_dict(self): d = {"a": 0.0, "b": 1.0, "c": 2.0} result = Series(d) expected = Series(d, index=sorted(d.keys())) tm.assert_series_equal(result, expected) result = Series(d, index=["b", "c", "d", "a"]) expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"]) tm.assert_series_equal(result, expected) pidx = period_range("2020-01-01", periods=10, freq="D") d = {pidx[0]: 0, pidx[1]: 1} result = Series(d, index=pidx) expected = Series(np.nan, pidx, dtype=np.float64) expected.iloc[0] = 0 expected.iloc[1] = 1 tm.assert_series_equal(result, expected) def test_constructor_dict_list_value_explicit_dtype(self): # GH 18625 d = {"a": [[2], [3], [4]]} result = Series(d, index=["a"], dtype="object") expected = Series(d, index=["a"]) tm.assert_series_equal(result, expected) def test_constructor_dict_order(self): # GH19018 # initialization ordering: by insertion order d = {"b": 1, "a": 0, "c": 2} result = Series(d) expected = Series([1, 0, 2], index=list("bac")) tm.assert_series_equal(result, expected) def test_constructor_dict_extension(self, ea_scalar_and_dtype): ea_scalar, ea_dtype = ea_scalar_and_dtype d = {"a": ea_scalar} result = Series(d, index=["a"]) expected = Series(ea_scalar, index=["a"], dtype=ea_dtype) assert result.dtype == ea_dtype tm.assert_series_equal(result, expected) @pytest.mark.parametrize("value", [2, np.nan, None, float("nan")]) def test_constructor_dict_nan_key(self, value): # GH 18480 d = {1: "a", value: "b", float("nan"): "c", 4: "d"} result = Series(d).sort_values() expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4]) tm.assert_series_equal(result, expected) # MultiIndex: d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"} result = Series(d).sort_values() expected = Series( ["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)]) ) tm.assert_series_equal(result, expected) def test_constructor_dict_datetime64_index(self): # GH 9456 dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"] values = [42544017.198965244, 1234565, 40512335.181958228, -1] def create_data(constructor): return dict(zip((constructor(x) for x in dates_as_str), values)) data_datetime64 = create_data(np.datetime64) data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d")) data_Timestamp = create_data(Timestamp) expected = Series(values, (Timestamp(x) for x in dates_as_str)) result_datetime64 = Series(data_datetime64) result_datetime = Series(data_datetime) result_Timestamp = Series(data_Timestamp) tm.assert_series_equal( result_datetime64, expected.set_axis(expected.index.as_unit("s")) ) tm.assert_series_equal(result_datetime, expected) tm.assert_series_equal(result_Timestamp, expected) def test_constructor_dict_tuple_indexer(self): # GH 12948 data = {(1, 1, None): -1.0} result = Series(data) expected = Series( -1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]]) ) tm.assert_series_equal(result, expected) def test_constructor_mapping(self, non_dict_mapping_subclass): # GH 29788 ndm = non_dict_mapping_subclass({3: "three"}) result = Series(ndm) expected = Series(["three"], index=[3]) tm.assert_series_equal(result, expected) def test_constructor_list_of_tuples(self): data = [(1, 1), (2, 2), (2, 3)] s = Series(data) assert list(s) == data def test_constructor_tuple_of_tuples(self): data = ((1, 1), (2, 2), (2, 3)) s = Series(data) assert tuple(s) == data @pytest.mark.parametrize( "data, expected_values, expected_index", [ ({(1, 2): 3, (None, 5): 6}, [3, 6], [(1, 2), (None, 5)]), ({(1,): 3, (4, 5): 6}, [3, 6], [(1, None), (4, 5)]), ], ) def test_constructor_dict_of_tuples(self, data, expected_values, expected_index): # GH 60695 result = Series(data).sort_values() expected = Series(expected_values, index=MultiIndex.from_tuples(expected_index)) tm.assert_series_equal(result, expected) # https://github.com/pandas-dev/pandas/issues/22698 @pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning") def test_fromDict(self, using_infer_string): data = {"a": 0, "b": 1, "c": 2, "d": 3} series = Series(data) tm.assert_is_sorted(series.index) data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()} series = Series(data) assert series.dtype == np.object_ data = {"a": 0, "b": "1", "c": "2", "d": "3"} series = Series(data) assert series.dtype == np.object_ if not using_infer_string else "str" data = {"a": "0", "b": "1"} series = Series(data, dtype=float) assert series.dtype == np.float64 def test_fromValue(self, datetime_series, using_infer_string): nans = Series(np.nan, index=datetime_series.index, dtype=np.float64) assert nans.dtype == np.float64 assert len(nans) == len(datetime_series) strings = Series("foo", index=datetime_series.index) assert strings.dtype == np.object_ if not using_infer_string else "str" assert len(strings) == len(datetime_series) d = datetime.now() dates = Series(d, index=datetime_series.index) assert dates.dtype == "M8[us]" assert len(dates) == len(datetime_series) # GH12336 # Test construction of categorical series from value categorical = Series(0, index=datetime_series.index, dtype="category") expected = Series(0, index=datetime_series.index).astype("category") assert categorical.dtype == "category" assert len(categorical) == len(datetime_series) tm.assert_series_equal(categorical, expected) def test_constructor_dtype_timedelta64(self): # basic td = Series([timedelta(days=i) for i in range(3)]) assert td.dtype == "timedelta64[ns]" td = Series([timedelta(days=1)]) assert td.dtype == "timedelta64[ns]" td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")]) assert td.dtype == "timedelta64[ns]" # mixed with NaT td = Series([timedelta(days=1), NaT], dtype="m8[ns]") assert td.dtype == "timedelta64[ns]" td = Series([timedelta(days=1), np.nan], dtype="m8[ns]") assert td.dtype == "timedelta64[ns]" td = Series([np.timedelta64(300000000), NaT], dtype="m8[ns]") assert td.dtype == "timedelta64[ns]" # improved inference # GH5689 td = Series([np.timedelta64(300000000), NaT]) assert td.dtype == "timedelta64[ns]" # because iNaT is int, not coerced to timedelta td = Series([np.timedelta64(300000000), iNaT]) assert td.dtype == "object" td = Series([np.timedelta64(300000000), np.nan]) assert td.dtype == "timedelta64[ns]" td = Series([NaT, np.timedelta64(300000000)]) assert td.dtype == "timedelta64[ns]" td = Series([np.timedelta64(1, "s")]) assert td.dtype == "timedelta64[ns]" # valid astype td.astype("int64") # invalid casting msg = r"Converting from timedelta64\[ns\] to int32 is not supported" with pytest.raises(TypeError, match=msg): td.astype("int32") # this is an invalid casting msg = "|".join( [ "Could not convert object to NumPy timedelta", "Could not convert 'foo' to NumPy timedelta", ] ) with pytest.raises(ValueError, match=msg): Series([timedelta(days=1), "foo"], dtype="m8[ns]") # leave as object here td = Series([timedelta(days=i) for i in range(3)] + ["foo"]) assert td.dtype == "object" # as of 2.0, these no longer infer timedelta64 based on the strings, # matching Index behavior ser = Series([None, NaT, "1 Day"]) assert ser.dtype == object ser = Series([np.nan, NaT, "1 Day"]) assert ser.dtype == object ser = Series([NaT, None, "1 Day"]) assert ser.dtype == object ser = Series([NaT, np.nan, "1 Day"]) assert ser.dtype == object # GH 16406 def test_constructor_mixed_tz(self): s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")]) expected = Series( [Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")], dtype="object", ) tm.assert_series_equal(s, expected) def test_NaT_scalar(self): series = Series([0, 1000, 2000, iNaT], dtype="M8[ns]") val = series[3] assert isna(val) series[2] = val assert isna(series[2]) def test_NaT_cast(self): # GH10747 result = Series([np.nan]).astype("M8[ns]") expected = Series([NaT], dtype="M8[ns]") tm.assert_series_equal(result, expected) def test_constructor_name_hashable(self): for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05d0"]: for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]: s = Series(data, name=n) assert s.name == n def test_constructor_name_unhashable(self): msg = r"Series\.name must be a hashable type" for n in [["name_list"], np.ones(2), {1: 2}]: for data in [["name_list"], np.ones(2), {1: 2}]: with pytest.raises(TypeError, match=msg): Series(data, name=n) def test_auto_conversion(self): series = Series(list(date_range("1/1/2000", periods=10, unit="ns"))) assert series.dtype == "M8[ns]" def test_convert_non_ns(self): # convert from a numpy array of non-ns timedelta64 arr = np.array([1, 2, 3], dtype="timedelta64[s]") ser = Series(arr) assert ser.dtype == arr.dtype tdi = timedelta_range("00:00:01", periods=3, freq="s").as_unit("s") expected = Series(tdi) assert expected.dtype == arr.dtype tm.assert_series_equal(ser, expected) # convert from a numpy array of non-ns datetime64 arr = np.array( ["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]" ) ser = Series(arr) expected = Series(date_range("20130101", periods=3, freq="D"), dtype="M8[s]") assert expected.dtype == "M8[s]" tm.assert_series_equal(ser, expected) arr = np.array( ["2013-01-01 00:00:01", "2013-01-01 00:00:02", "2013-01-01 00:00:03"], dtype="datetime64[s]", ) ser = Series(arr) expected = Series( date_range("20130101 00:00:01", periods=3, freq="s"), dtype="M8[s]" ) assert expected.dtype == "M8[s]" tm.assert_series_equal(ser, expected) @pytest.mark.parametrize( "index", [ date_range("1/1/2000", periods=10), timedelta_range("1 day", periods=10), period_range("2000-Q1", periods=10, freq="Q"), ], ids=lambda x: type(x).__name__, ) def test_constructor_cant_cast_datetimelike(self, index): # floats are not ok # strip Index to convert PeriodIndex -> Period # We don't care whether the error message says # PeriodIndex or PeriodArray msg = f"Cannot cast {type(index).__name__.rstrip('Index')}.*? to " with pytest.raises(TypeError, match=msg): Series(index, dtype=float) # ints are ok # we test with np.int64 to get similar results on # windows / 32-bit platforms result = Series(index, dtype=np.int64) expected = Series(index.astype(np.int64)) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "index", [ date_range("1/1/2000", periods=10), timedelta_range("1 day", periods=10), period_range("2000-Q1", periods=10, freq="Q"), ], ids=lambda x: type(x).__name__, ) def test_constructor_cast_object(self, index): s = Series(index, dtype=object) exp = Series(index).astype(object) tm.assert_series_equal(s, exp) s = Series(Index(index, dtype=object), dtype=object) exp = Series(index).astype(object) tm.assert_series_equal(s, exp) s = Series(index.astype(object), dtype=object) exp = Series(index).astype(object) tm.assert_series_equal(s, exp) @pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64]) def test_constructor_generic_timestamp_no_frequency(self, dtype, request): # see gh-15524, gh-15987 msg = "dtype has no unit. Please pass in" if np.dtype(dtype).name not in ["timedelta64", "datetime64"]: mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit") request.applymarker(mark) with pytest.raises(ValueError, match=msg): Series([], dtype=dtype) @pytest.mark.parametrize("unit", ["ps", "as", "fs", "Y", "M", "W", "D", "h", "m"]) @pytest.mark.parametrize("kind", ["m", "M"]) def test_constructor_generic_timestamp_bad_frequency(self, kind, unit): # see gh-15524, gh-15987 # as of 2.0 we raise on any non-supported unit rather than silently # cast to nanos; previously we only raised for frequencies higher # than ns dtype = f"{kind}8[{unit}]" msg = "dtype=.* is not supported. Supported resolutions are" with pytest.raises(TypeError, match=msg): Series([], dtype=dtype) with pytest.raises(TypeError, match=msg): # pre-2.0 the DataFrame cast raised but the Series case did not DataFrame([[0]], dtype=dtype) @pytest.mark.parametrize("dtype", [None, "uint8", "category"]) def test_constructor_range_dtype(self, dtype): # GH 16804 expected = Series([0, 1, 2, 3, 4], dtype=dtype or "int64") result = Series(range(5), dtype=dtype) tm.assert_series_equal(result, expected) def test_constructor_range_overflows(self): # GH#30173 range objects that overflow int64 rng = range(2**63, 2**63 + 4) ser = Series(rng) expected = Series(list(rng)) tm.assert_series_equal(ser, expected) assert list(ser) == list(rng) assert ser.dtype == np.uint64 rng2 = range(2**63 + 4, 2**63, -1) ser2 = Series(rng2) expected2 = Series(list(rng2)) tm.assert_series_equal(ser2, expected2) assert list(ser2) == list(rng2) assert ser2.dtype == np.uint64 rng3 = range(-(2**63), -(2**63) - 4, -1) ser3 = Series(rng3) expected3 = Series(list(rng3)) tm.assert_series_equal(ser3, expected3) assert list(ser3) == list(rng3) assert ser3.dtype == object rng4 = range(2**73, 2**73 + 4) ser4 = Series(rng4) expected4 = Series(list(rng4)) tm.assert_series_equal(ser4, expected4) assert list(ser4) == list(rng4) assert ser4.dtype == object def test_constructor_tz_mixed_data(self): # GH 13051 dt_list = [ Timestamp("2016-05-01 02:03:37"), Timestamp("2016-04-30 19:03:37-0700", tz="US/Pacific"), ] result = Series(dt_list) expected = Series(dt_list, dtype=object) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("pydt", [True, False]) def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture, pydt): # GH#25843, GH#41555, GH#33401 tz = tz_aware_fixture ts = Timestamp("2019", tz=tz) if pydt: ts = ts.to_pydatetime() msg = ( "Cannot convert timezone-aware data to timezone-naive dtype. " r"Use pd.Series\(values\).dt.tz_localize\(None\) instead." ) with pytest.raises(ValueError, match=msg): Series([ts], dtype="datetime64[ns]") with pytest.raises(ValueError, match=msg): Series(np.array([ts], dtype=object), dtype="datetime64[ns]") with pytest.raises(ValueError, match=msg): Series({0: ts}, dtype="datetime64[ns]") msg = "Cannot unbox tzaware Timestamp to tznaive dtype" with pytest.raises(TypeError, match=msg): Series(ts, index=[0], dtype="datetime64[ns]") def test_constructor_datetime64(self): rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s") dates = np.asarray(rng) series = Series(dates) assert np.issubdtype(series.dtype, np.dtype("M8[ns]")) def test_constructor_datetimelike_scalar_to_string_dtype( self, nullable_string_dtype ): # https://github.com/pandas-dev/pandas/pull/33846 result = Series("M", index=[1, 2, 3], dtype=nullable_string_dtype) expected = Series(["M", "M", "M"], index=[1, 2, 3], dtype=nullable_string_dtype) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("box", [lambda x: x, np.datetime64]) def test_constructor_sparse_datetime64(self, box): # https://github.com/pandas-dev/pandas/issues/35762 values = [box("2012-01-01"), box("2013-01-01")] dtype = pd.SparseDtype("datetime64[ns]") result = Series(values, dtype=dtype) arr = pd.arrays.SparseArray(values, dtype=dtype) expected = Series(arr) tm.assert_series_equal(result, expected) def test_construction_from_ordered_collection(self): # https://github.com/pandas-dev/pandas/issues/36044 result = Series({"a": 1, "b": 2}.keys()) expected = Series(["a", "b"]) tm.assert_series_equal(result, expected) result = Series({"a": 1, "b": 2}.values()) expected = Series([1, 2]) tm.assert_series_equal(result, expected) def test_construction_from_large_int_scalar_no_overflow(self): # https://github.com/pandas-dev/pandas/issues/36291 n = 1_000_000_000_000_000_000_000 result = Series(n, index=[0]) expected = Series(n) tm.assert_series_equal(result, expected) def test_constructor_list_of_periods_infers_period_dtype(self): series = Series(list(period_range("2000-01-01", periods=10, freq="D"))) assert series.dtype == "Period[D]" series = Series( [Period("2011-01-01", freq="D"), Period("2011-02-01", freq="D")] ) assert series.dtype == "Period[D]" def test_constructor_subclass_dict(self, dict_subclass): data = dict_subclass((x, 10.0 * x) for x in range(10)) series = Series(data) expected = Series(dict(data.items())) tm.assert_series_equal(series, expected) def test_constructor_ordereddict(self): # GH3283 data = OrderedDict( (f"col{i}", np.random.default_rng(2).random()) for i in range(12) ) series = Series(data) expected = Series(list(data.values()), list(data.keys())) tm.assert_series_equal(series, expected) # Test with subclass class A(OrderedDict): pass series = Series(A(data)) tm.assert_series_equal(series, expected) @pytest.mark.parametrize( "data, expected_index_multi", [ ({("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}, True), ({("a",): 0.0, ("a", "b"): 1.0}, True), ({"z": 111.0, ("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}, False), ], ) def test_constructor_dict_multiindex(self, data, expected_index_multi): # GH#60695 result = Series(data) if expected_index_multi: expected = Series( list(data.values()), index=MultiIndex.from_tuples(list(data.keys())), ) tm.assert_series_equal(result, expected) else: expected = Series( list(data.values()), index=Index(list(data.keys())), ) tm.assert_series_equal(result, expected) def test_constructor_dict_multiindex_reindex_flat(self): # construction involves reindexing with a MultiIndex corner case data = {("i", "i"): 0, ("i", "j"): 1, ("j", "i"): 2, "j": np.nan} expected = Series(data) result = Series(expected[:-1].to_dict(), index=expected.index) tm.assert_series_equal(result, expected) def test_constructor_dict_timedelta_index(self): # GH #12169 : Resample category data with timedelta index # construct Series from dict as data and TimedeltaIndex as index # will result NaN in result Series data expected = Series( data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s") ) result = Series( data={ pd.to_timedelta(0, unit="s"): "A", pd.to_timedelta(10, unit="s"): "B", pd.to_timedelta(20, unit="s"): "C", }, index=pd.to_timedelta([0, 10, 20], unit="s"), ) tm.assert_series_equal(result, expected) def test_constructor_infer_index_tz(self): values = [188.5, 328.25] tzinfo = tzoffset(None, 7200) index = [ datetime(2012, 5, 11, 11, tzinfo=tzinfo), datetime(2012, 5, 11, 12, tzinfo=tzinfo), ] series = Series(data=values, index=index) assert series.index.tz == tzinfo # it works! GH#2443 repr(series.index[0]) def test_constructor_with_pandas_dtype(self): # going through 2D->1D path vals = [(1,), (2,), (3,)] ser = Series(vals) dtype = ser.array.dtype # NumpyEADtype ser2 = Series(vals, dtype=dtype) tm.assert_series_equal(ser, ser2) def test_constructor_int_dtype_missing_values(self): # GH#43017 result = Series(index=[0], dtype="int64") expected = Series(np.nan, index=[0], dtype="float64") tm.assert_series_equal(result, expected) def test_constructor_bool_dtype_missing_values(self): # GH#43018 result = Series(index=[0], dtype="bool") expected = Series(True, index=[0], dtype="bool") tm.assert_series_equal(result, expected) def test_constructor_int64_dtype(self, any_int_dtype): # GH#44923 result = Series(["0", "1", "2"], dtype=any_int_dtype) expected = Series([0, 1, 2], dtype=any_int_dtype) tm.assert_series_equal(result, expected) def test_constructor_raise_on_lossy_conversion_of_strings(self): # GH#44923 if not np_version_gt2: raises = pytest.raises( ValueError, match="string values cannot be losslessly cast to int8" ) else: raises = pytest.raises( OverflowError, match="The elements provided in the data" ) with raises: Series(["128"], dtype="int8") def test_constructor_dtype_timedelta_alternative_construct(self): # GH#35465 result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]") expected = Series(pd.to_timedelta([1000000, 200000, 3000000], unit="ns")) tm.assert_series_equal(result, expected) @pytest.mark.xfail( reason="Not clear what the correct expected behavior should be with " "integers now that we support non-nano. ATM (2022-10-08) we treat ints " "as nanoseconds, then cast to the requested dtype. xref #48312" ) def test_constructor_dtype_timedelta_ns_s(self): # GH#35465 result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]") expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]") tm.assert_series_equal(result, expected) @pytest.mark.xfail( reason="Not clear what the correct expected behavior should be with " "integers now that we support non-nano. ATM (2022-10-08) we treat ints " "as nanoseconds, then cast to the requested dtype. xref #48312" ) def test_constructor_dtype_timedelta_ns_s_astype_int64(self): # GH#35465 result = Series([1000000, 200000, 3000000], dtype="timedelta64[ns]").astype( "int64" ) expected = Series([1000000, 200000, 3000000], dtype="timedelta64[s]").astype( "int64" ) tm.assert_series_equal(result, expected) @pytest.mark.filterwarnings( "ignore:elementwise comparison failed:DeprecationWarning" ) @pytest.mark.parametrize("func", [Series, DataFrame, Index, pd.array]) def test_constructor_mismatched_null_nullable_dtype( self, func, any_numeric_ea_dtype ): # GH#44514 msg = "|".join( [ "cannot safely cast non-equivalent object", r"int\(\) argument must be a string, a bytes-like object " "or a (real )?number", r"Cannot cast array data from dtype\('O'\) to dtype\('float64'\) " "according to the rule 'safe'", "object cannot be converted to a FloatingDtype", "'values' contains non-numeric NA", ] ) for null in tm.NP_NAT_OBJECTS + [NaT]: with pytest.raises(TypeError, match=msg): func([null, 1.0, 3.0], dtype=any_numeric_ea_dtype) def test_series_constructor_ea_int_from_bool(self): # GH#42137 result = Series([True, False, True, pd.NA], dtype="Int64") expected = Series([1, 0, 1, pd.NA], dtype="Int64") tm.assert_series_equal(result, expected) result = Series([True, False, True], dtype="Int64") expected = Series([1, 0, 1], dtype="Int64") tm.assert_series_equal(result, expected) def test_series_constructor_ea_int_from_string_bool(self): # GH#42137 with pytest.raises(ValueError, match="invalid literal"): Series(["True", "False", "True", pd.NA], dtype="Int64") @pytest.mark.parametrize("val", [1, 1.0]) def test_series_constructor_overflow_uint_ea(self, val): # GH#38798 max_val = np.iinfo(np.uint64).max - 1 result = Series([max_val, val], dtype="UInt64") expected = Series(np.array([max_val, 1], dtype="uint64"), dtype="UInt64") tm.assert_series_equal(result, expected) @pytest.mark.parametrize("val", [1, 1.0]) def test_series_constructor_overflow_uint_ea_with_na(self, val): # GH#38798 max_val = np.iinfo(np.uint64).max - 1 result = Series([max_val, val, pd.NA], dtype="UInt64") expected = Series( IntegerArray( np.array([max_val, 1, 0], dtype="uint64"), np.array([0, 0, 1], dtype=np.bool_), ) ) tm.assert_series_equal(result, expected) def test_series_constructor_overflow_uint_with_nan(self): # GH#38798 max_val = np.iinfo(np.uint64).max - 1 result = Series([max_val, pd.NA], dtype="UInt64") expected = Series( IntegerArray( np.array([max_val, 1], dtype="uint64"), np.array([0, 1], dtype=np.bool_), ) ) tm.assert_series_equal(result, expected) def test_series_constructor_ea_all_na(self): # GH#38798 result = Series([pd.NA, pd.NA], dtype="UInt64") expected = Series( IntegerArray( np.array([1, 1], dtype="uint64"), np.array([1, 1], dtype=np.bool_), ) ) tm.assert_series_equal(result, expected) def test_series_from_index_dtype_equal_does_not_copy(self): # GH#52008 idx = Index([1, 2, 3]) expected = idx.copy(deep=True) ser = Series(idx, dtype="int64") ser.iloc[0] = 100 tm.assert_index_equal(idx, expected) def test_series_string_inference(self): # GH#54430 with pd.option_context("future.infer_string", True): ser = Series(["a", "b"]) dtype = pd.StringDtype("pyarrow" if HAS_PYARROW else "python", na_value=np.nan) expected = Series(["a", "b"], dtype=dtype) tm.assert_series_equal(ser, expected) expected = Series(["a", 1], dtype="object") with pd.option_context("future.infer_string", True): ser = Series(["a", 1]) tm.assert_series_equal(ser, expected) @pytest.mark.parametrize("na_value", [None, np.nan, pd.NA]) def test_series_string_with_na_inference(self, na_value): # GH#54430 with pd.option_context("future.infer_string", True): ser = Series(["a", na_value]) dtype = pd.StringDtype("pyarrow" if HAS_PYARROW else "python", na_value=np.nan) expected = Series(["a", None], dtype=dtype) tm.assert_series_equal(ser, expected) def test_series_string_inference_scalar(self): # GH#54430 with pd.option_context("future.infer_string", True): ser = Series("a", index=[1]) dtype = pd.StringDtype("pyarrow" if HAS_PYARROW else "python", na_value=np.nan) expected = Series("a", index=[1], dtype=dtype) tm.assert_series_equal(ser, expected) def test_series_string_inference_array_string_dtype(self): # GH#54496 with pd.option_context("future.infer_string", True): ser = Series(np.array(["a", "b"])) dtype = pd.StringDtype("pyarrow" if HAS_PYARROW else "python", na_value=np.nan) expected = Series(["a", "b"], dtype=dtype) tm.assert_series_equal(ser, expected) def test_series_string_inference_storage_definition(self): # https://github.com/pandas-dev/pandas/issues/54793 # but after PDEP-14 (string dtype), it was decided to keep dtype="string" # returning the NA string dtype, so expected is changed from # "string[pyarrow_numpy]" to "string[python]" expected = Series( ["a", "b"], dtype="string[pyarrow]" if HAS_PYARROW else "string[python]" ) with pd.option_context("future.infer_string", True): result = Series(["a", "b"], dtype="string") tm.assert_series_equal(result, expected) expected = Series(["a", "b"], dtype=pd.StringDtype(na_value=np.nan)) with pd.option_context("future.infer_string", True): result = Series(["a", "b"], dtype="str") tm.assert_series_equal(result, expected) def test_series_constructor_infer_string_scalar(self): # GH#55537 with pd.option_context("future.infer_string", True): ser = Series("a", index=[1, 2], dtype="string[python]") expected = Series(["a", "a"], index=[1, 2], dtype="string[python]") tm.assert_series_equal(ser, expected) assert ser.dtype.storage == "python" def test_series_string_inference_na_first(self): # GH#55655 with pd.option_context("future.infer_string", True): result = Series([pd.NA, "b"]) dtype = pd.StringDtype("pyarrow" if HAS_PYARROW else "python", na_value=np.nan) expected = Series([None, "b"], dtype=dtype) tm.assert_series_equal(result, expected) @pytest.mark.parametrize("klass", [Series, Index]) def test_inference_on_pandas_objects(self, klass): # GH#56012 obj = klass([Timestamp("2019-12-31")], dtype=object) # This doesn't do inference result = Series(obj) assert result.dtype == np.object_
TestSeriesConstructors
python
huggingface__transformers
tests/models/bert_japanese/test_tokenization_bert_japanese.py
{ "start": 1197, "end": 14323 }
class ____(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "cl-tohoku/bert-base-japanese" tokenizer_class = BertJapaneseTokenizer test_rust_tokenizer = False space_between_special_tokens = True @classmethod def setUpClass(cls): super().setUpClass() # Create a separate temp directory for the vocab file to avoid conflicts # with files saved by the base class setUpClass (e.g., tokenizer_config.json, added_tokens.json) cls.vocab_tmpdirname = tempfile.mkdtemp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは", "世界", "##世界", "、", "##、", "。", "##。", "アップルストア", "外国", "##人", "参政", "##権", "此れ", "は", "猫", "です", ] cls.vocab_file = os.path.join(cls.vocab_tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) @classmethod def get_tokenizer(cls, pretrained_name=None, **kwargs): """Override to use vocab_tmpdirname instead of tmpdirname to avoid conflicts with saved tokenizer files.""" pretrained_name = pretrained_name or cls.vocab_tmpdirname return cls.tokenizer_class.from_pretrained(pretrained_name, **kwargs) @classmethod def tearDownClass(cls): super().tearDownClass() if hasattr(cls, "vocab_tmpdirname"): shutil.rmtree(cls.vocab_tmpdirname, ignore_errors=True) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、世界。" output_text = "こんにちは 、 世界 。 こんばんは 、 世界 。" return input_text, output_text def get_clean_sequence(self, tokenizer): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids def test_pretokenized_inputs(self): pass # TODO add if relevant def test_maximum_encoding_length_pair_input(self): pass # TODO add if relevant def test_maximum_encoding_length_single_input(self): pass # TODO add if relevant def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。") self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) def test_mecab_full_tokenizer_with_mecab_kwargs(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="mecab", mecab_kwargs={"mecab_dic": "ipadic"} ) text = "アップルストア" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["アップルストア"]) def test_mecab_tokenizer_ipadic(self): tokenizer = MecabTokenizer(mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_unidic_lite(self): try: tokenizer = MecabTokenizer(mecab_dic="unidic_lite") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_unidic(self): try: import unidic self.assertTrue( os.path.isdir(unidic.DICDIR), "The content of unidic was not downloaded. Run `python -m unidic download` before running this test case. Note that this requires 2.1GB on disk.", ) tokenizer = MecabTokenizer(mecab_dic="unidic") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_lower(self): tokenizer = MecabTokenizer(do_lower_case=True, mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_with_option(self): try: tokenizer = MecabTokenizer( do_lower_case=True, normalize_text=False, mecab_option="-d /usr/local/lib/mecab/dic/jumandic" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"], ) def test_mecab_tokenizer_no_normalize(self): tokenizer = MecabTokenizer(normalize_text=False, mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"], ) @require_sudachi_projection def test_sudachi_tokenizer_core(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core") # fmt: off self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "], ) # fmt: on @require_sudachi_projection def test_sudachi_tokenizer_split_mode_A(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="A") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国", "人", "参政", "権"]) @require_sudachi_projection def test_sudachi_tokenizer_split_mode_B(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="B") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人", "参政権"]) @require_sudachi_projection def test_sudachi_tokenizer_split_mode_C(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="C") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人参政権"]) @require_sudachi_projection def test_sudachi_full_tokenizer_with_sudachi_kwargs_split_mode_B(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="sudachi", sudachi_kwargs={"sudachi_split_mode": "B"} ) self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国", "##人", "参政", "##権"]) @require_sudachi_projection def test_sudachi_tokenizer_projection(self): tokenizer = SudachiTokenizer( sudachi_dict_type="core", sudachi_split_mode="A", sudachi_projection="normalized_nouns" ) self.assertListEqual(tokenizer.tokenize("これはねこです。"), ["此れ", "は", "猫", "です", "。"]) @require_sudachi_projection def test_sudachi_full_tokenizer_with_sudachi_kwargs_sudachi_projection(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="sudachi", sudachi_kwargs={"sudachi_projection": "normalized_nouns"} ) self.assertListEqual(tokenizer.tokenize("これはねこです。"), ["此れ", "は", "猫", "です", "。"]) @require_sudachi_projection def test_sudachi_tokenizer_lower(self): tokenizer = SudachiTokenizer(do_lower_case=True, sudachi_dict_type="core") self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),[" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "]) # fmt: skip @require_sudachi_projection def test_sudachi_tokenizer_no_normalize(self): tokenizer = SudachiTokenizer(normalize_text=False, sudachi_dict_type="core") self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "]) # fmt: skip @require_sudachi_projection def test_sudachi_tokenizer_trim_whitespace(self): tokenizer = SudachiTokenizer(trim_whitespace=True, sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) @require_jumanpp def test_jumanpp_tokenizer(self): tokenizer = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"]) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_lower(self): tokenizer = JumanppTokenizer(do_lower_case=True) self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"],) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_no_normalize(self): tokenizer = JumanppTokenizer(normalize_text=False) self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"],) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_trim_whitespace(self): tokenizer = JumanppTokenizer(trim_whitespace=True) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"], ) @require_jumanpp def test_jumanpp_full_tokenizer_with_jumanpp_kwargs_trim_whitespace(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="jumanpp", jumanpp_kwargs={"trim_whitespace": True} ) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) @require_jumanpp def test_jumanpp_tokenizer_ext(self): tokenizer = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。"), ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"], ) def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] # fmt: skip vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こんにちは"]) self.assertListEqual(tokenizer.tokenize("こんばんは"), ["こん", "##ばんは"]) self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは"), ["こん", "##ばんは", "[UNK]", "こんにちは"]) # fmt: skip def test_sentencepiece_tokenizer(self): tokenizer = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp") subword_tokenizer = tokenizer.subword_tokenizer tokens = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。") self.assertListEqual(tokens, ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"]) # fmt: skip tokens = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは") self.assertListEqual(tokens, ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"]) def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese") text = tokenizer.encode("ありがとう。", add_special_tokens=False) text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_2 + [3] @custom_tokenizers
BertJapaneseTokenizationTest
python
walkccc__LeetCode
solutions/72. Edit Distance/72.py
{ "start": 0, "end": 604 }
class ____: def minDistance(self, word1: str, word2: str) -> int: m = len(word1) n = len(word2) # dp[i][j] := the minimum number of operations to convert word1[0..i) to # word2[0..j) dp = [[0] * (n + 1) for _ in range(m + 1)] for i in range(1, m + 1): dp[i][0] = i for j in range(1, n + 1): dp[0][j] = j for i in range(1, m + 1): for j in range(1, n + 1): if word1[i - 1] == word2[j - 1]: dp[i][j] = dp[i - 1][j - 1] else: dp[i][j] = min(dp[i - 1][j - 1], dp[i - 1][j], dp[i][j - 1]) + 1 return dp[m][n]
Solution
python
apache__airflow
airflow-core/tests/unit/utils/test_sqlalchemy.py
{ "start": 1782, "end": 2803 }
class ____: def test_returns_dialect_name_when_present(self, mocker): mock_session = mocker.Mock() mock_bind = mocker.Mock() mock_bind.dialect.name = "postgresql" mock_session.get_bind.return_value = mock_bind assert get_dialect_name(mock_session) == "postgresql" def test_raises_when_no_bind(self, mocker): mock_session = mocker.Mock() mock_session.get_bind.return_value = None with pytest.raises(ValueError, match="No bind/engine is associated"): get_dialect_name(mock_session) def test_returns_none_when_dialect_has_no_name(self, mocker): mock_session = mocker.Mock() mock_bind = mocker.Mock() # simulate dialect object without `name` attribute mock_bind.dialect = mock.Mock() delattr(mock_bind.dialect, "name") if hasattr(mock_bind.dialect, "name") else None mock_session.get_bind.return_value = mock_bind assert get_dialect_name(mock_session) is None
TestGetDialectName
python
huggingface__transformers
src/transformers/models/led/modeling_led.py
{ "start": 47124, "end": 47912 }
class ____(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states @auto_docstring
LEDClassificationHead
python
airbytehq__airbyte
airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py
{ "start": 7764, "end": 8726 }
class ____(IncrementalShopifyGraphQlBulkStream): bulk_query: Transaction = Transaction cursor_field = "created_at" @property def name(self) -> str: # override default name. This stream is essentially the same as `Transactions` stream, but it's using GraphQL API, which does not include the user_id field return "transactions" def get_json_schema(self) -> Mapping[str, Any]: """ This stream has the same schema as `Transactions` stream, except of: - fields: [ `device_id, source_name, user_id, location_id` ] Specifically: - `user_id` field requires `Shopify Plus` / be authorised via `Financialy Embedded App`. - additional `read_users` scope is required https://shopify.dev/docs/api/usage/access-scopes#authenticated-access-scopes """ return ResourceSchemaLoader(package_name_from_class(Transactions)).get_schema("transactions")
TransactionsGraphql
python
pytorch__pytorch
test/torch_np/numpy_tests/lib/test_function_base.py
{ "start": 21118, "end": 21574 }
class ____(TestCase): def test_basic(self): a = np.array([3, 4, 5, 10, -3, -5, 6.0]) assert_equal(a.ptp(axis=0), 15.0) b = np.array([[3, 6.0, 9.0], [4, 10.0, 5.0], [8, 3.0, 2.0]]) assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0]) assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0]) assert_equal(b.ptp(axis=0, keepdims=True), [[5.0, 7.0, 7.0]]) assert_equal(b.ptp(axis=(0, 1), keepdims=True), [[8.0]])
TestPtp
python
doocs__leetcode
solution/3500-3599/3565.Sequential Grid Path Cover/Solution.py
{ "start": 0, "end": 1188 }
class ____: def findPath(self, grid: List[List[int]], k: int) -> List[List[int]]: def f(i: int, j: int) -> int: return i * n + j def dfs(i: int, j: int, v: int): nonlocal st path.append([i, j]) if len(path) == m * n: return True st |= 1 << f(i, j) if grid[i][j] == v: v += 1 for a, b in pairwise(dirs): x, y = i + a, j + b if ( 0 <= x < m and 0 <= y < n and (st & 1 << f(x, y)) == 0 and grid[x][y] in (0, v) ): if dfs(x, y, v): return True path.pop() st ^= 1 << f(i, j) return False m, n = len(grid), len(grid[0]) st = 0 path = [] dirs = (-1, 0, 1, 0, -1) for i in range(m): for j in range(n): if grid[i][j] in (0, 1): if dfs(i, j, 1): return path path.clear() st = 0 return []
Solution
python
numpy__numpy
benchmarks/benchmarks/bench_core.py
{ "start": 5680, "end": 6242 }
class ____(Benchmark): def setup(self): self.d = np.ones(10000, dtype=np.uint8) self.d2 = np.ones((200, 1000), dtype=np.uint8) def time_unpackbits(self): np.unpackbits(self.d) def time_unpackbits_little(self): np.unpackbits(self.d, bitorder="little") def time_unpackbits_axis0(self): np.unpackbits(self.d2, axis=0) def time_unpackbits_axis1(self): np.unpackbits(self.d2, axis=1) def time_unpackbits_axis1_little(self): np.unpackbits(self.d2, bitorder="little", axis=1)
UnpackBits
python
pytorch__pytorch
torch/_inductor/codecache.py
{ "start": 172865, "end": 172968 }
class ____: def result(self) -> Callable[..., Any]: raise NotImplementedError
CodeCacheFuture
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/abstractClass10.py
{ "start": 1116, "end": 1225 }
class ____(A): ... # This should generate an error. C.method1() # This should generate an error. C.method3()
C
python
ethereum__web3.py
web3/exceptions.py
{ "start": 4609, "end": 4739 }
class ____(Web3Exception): """ Raised when an ABI is present, but doesn't contain any functions. """
NoABIFunctionsFound
python
doocs__leetcode
lcof/面试题18. 删除链表的节点/Solution.py
{ "start": 134, "end": 430 }
class ____: def deleteNode(self, head: ListNode, val: int) -> ListNode: dummy = cur = ListNode(0, head) while cur.next: if cur.next.val == val: cur.next = cur.next.next break cur = cur.next return dummy.next
Solution
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol3.py
{ "start": 2656, "end": 2812 }
class ____(NamedTuple): x: str # This should generate an error because the protocol # indicates that 'a' must be writable. a: Proto7 = Class7("")
Class7
python
huggingface__transformers
src/transformers/models/mask2former/modeling_mask2former.py
{ "start": 19914, "end": 25584 }
class ____(nn.Module): """This class computes an assignment between the labels and the predictions of the network. For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). """ def __init__( self, cost_class: float = 1.0, cost_mask: float = 1.0, cost_dice: float = 1.0, num_points: int = 12544 ): """Creates the matcher Params: cost_class (`float`, *optional*, defaults to 1.0): Relative weight of the classification error in the matching cost. cost_mask (`float`, *optional*, defaults to 1.0): This is the relative weight of the focal loss of the binary mask in the matching cost. cost_dice (`float`, *optional*, defaults to 1.0): This is the relative weight of the dice loss of the binary mask in the matching cost. num_points (`int`, *optional*, defaults to 12544): No. of points to sample on which the mask loss will be calculated. The same set of K points are uniformly sampled for all prediction and ground truth masks to construct the cost matrix for bipartite matching. """ super().__init__() if cost_class == 0 and cost_mask == 0 and cost_dice == 0: raise ValueError("All costs can't be 0") self.num_points = num_points self.cost_class = cost_class self.cost_mask = cost_mask self.cost_dice = cost_dice @torch.no_grad() def forward( self, masks_queries_logits: torch.Tensor, class_queries_logits: torch.Tensor, mask_labels: torch.Tensor, class_labels: torch.Tensor, ) -> list[tuple[Tensor]]: """ Params: masks_queries_logits (`torch.Tensor`): A tensor of dim `batch_size, num_queries, num_labels` with the classification logits. class_queries_logits (`torch.Tensor`): A tensor of dim `batch_size, num_queries, height, width` with the predicted masks. class_labels (`torch.Tensor`): A tensor of dim `num_target_boxes` (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. mask_labels (`torch.Tensor`): A tensor of dim `num_target_boxes, height, width` containing the target masks. Returns: matched_indices (`list[tuple[Tensor]]`): A list of size batch_size, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected labels (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes). """ indices: list[tuple[np.array]] = [] # iterate through batch size batch_size = masks_queries_logits.shape[0] for i in range(batch_size): pred_probs = class_queries_logits[i].softmax(-1) pred_mask = masks_queries_logits[i] # Compute the classification cost. Contrary to the loss, we don't use the NLL, but approximate it in 1 - proba[target class]. The 1 is a constant that doesn't change the matching, it can be omitted. cost_class = -pred_probs[:, class_labels[i]] target_mask = mask_labels[i].to(pred_mask) target_mask = target_mask[:, None] pred_mask = pred_mask[:, None] # Sample ground truth and predicted masks point_coordinates = torch.rand(1, self.num_points, 2, device=pred_mask.device) target_coordinates = point_coordinates.repeat(target_mask.shape[0], 1, 1) target_mask = sample_point(target_mask, target_coordinates, align_corners=False).squeeze(1) pred_coordinates = point_coordinates.repeat(pred_mask.shape[0], 1, 1) pred_mask = sample_point(pred_mask, pred_coordinates, align_corners=False).squeeze(1) # compute the cross entropy loss between each mask pairs -> shape (num_queries, num_labels) cost_mask = pair_wise_sigmoid_cross_entropy_loss(pred_mask, target_mask) # Compute the dice loss between each mask pairs -> shape (num_queries, num_labels) cost_dice = pair_wise_dice_loss(pred_mask, target_mask) # final cost matrix cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice # eliminate infinite values in cost_matrix to avoid the error ``ValueError: cost matrix is infeasible`` cost_matrix = torch.minimum(cost_matrix, torch.tensor(1e10)) cost_matrix = torch.maximum(cost_matrix, torch.tensor(-1e10)) cost_matrix = torch.nan_to_num(cost_matrix, 0) # do the assignment using the hungarian algorithm in scipy assigned_indices: tuple[np.array] = linear_sum_assignment(cost_matrix.cpu()) indices.append(assigned_indices) # It could be stacked in one tensor matched_indices = [ (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices ] return matched_indices # Adapted from https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/criterion.py
Mask2FormerHungarianMatcher
python
doocs__leetcode
solution/2500-2599/2520.Count the Digits That Divide a Number/Solution.py
{ "start": 0, "end": 189 }
class ____: def countDigits(self, num: int) -> int: ans, x = 0, num while x: x, val = divmod(x, 10) ans += num % val == 0 return ans
Solution
python
kubernetes-client__python
kubernetes/client/exceptions.py
{ "start": 2623, "end": 3794 }
class ____(OpenApiException): def __init__(self, status=None, reason=None, http_resp=None): if http_resp: self.status = http_resp.status self.reason = http_resp.reason self.body = http_resp.data self.headers = http_resp.getheaders() else: self.status = status self.reason = reason self.body = None self.headers = None def __str__(self): """Custom error messages for exception""" error_message = "({0})\n"\ "Reason: {1}\n".format(self.status, self.reason) if self.headers: error_message += "HTTP response headers: {0}\n".format( self.headers) if self.body: error_message += "HTTP response body: {0}\n".format(self.body) return error_message def render_path(path_to_item): """Returns a string representation of a path""" result = "" for pth in path_to_item: if isinstance(pth, six.integer_types): result += "[{0}]".format(pth) else: result += "['{0}']".format(pth) return result
ApiException
python
justquick__django-activity-stream
actstream/tests/base.py
{ "start": 739, "end": 983 }
class ____(int): def __new__(cls, n): obj = super(LTE, cls).__new__(cls, n) obj.n = n return obj def __eq__(self, other): return other <= self.n def __repr__(self): return "<= %s" % self.n
LTE
python
doocs__leetcode
solution/1300-1399/1324.Print Words Vertically/Solution.py
{ "start": 0, "end": 348 }
class ____: def printVertically(self, s: str) -> List[str]: words = s.split() n = max(len(w) for w in words) ans = [] for j in range(n): t = [w[j] if j < len(w) else ' ' for w in words] while t[-1] == ' ': t.pop() ans.append(''.join(t)) return ans
Solution
python
apache__airflow
providers/mysql/tests/unit/mysql/hooks/test_mysql_connector_python.py
{ "start": 981, "end": 3585 }
class ____: def setup_method(self): self.connection = Connection( conn_id="test_conn_id", conn_type="mysql", login="login", password="password", host="host", schema="schema", extra='{"client": "mysql-connector-python"}', ) self.db_hook = MySqlHook() self.db_hook.get_connection = mock.Mock() self.db_hook.get_connection.return_value = self.connection @mock.patch("mysql.connector.connect") def test_get_conn(self, mock_connect): self.db_hook.get_conn() assert mock_connect.call_count == 1 args, kwargs = mock_connect.call_args assert args == () assert kwargs["user"] == "login" assert kwargs["password"] == "password" assert kwargs["host"] == "host" assert kwargs["database"] == "schema" @mock.patch("mysql.connector.connect") def test_get_conn_port(self, mock_connect): self.connection.port = 3307 self.db_hook.get_conn() assert mock_connect.call_count == 1 args, kwargs = mock_connect.call_args assert args == () assert kwargs["port"] == 3307 @mock.patch("mysql.connector.connect") def test_get_conn_allow_local_infile(self, mock_connect): extra_dict = self.connection.extra_dejson self.connection.extra = json.dumps(extra_dict) self.db_hook.local_infile = True self.db_hook.get_conn() assert mock_connect.call_count == 1 args, kwargs = mock_connect.call_args assert args == () assert kwargs["allow_local_infile"] == 1 @mock.patch("mysql.connector.connect") def test_get_ssl_mode(self, mock_connect): extra_dict = self.connection.extra_dejson extra_dict.update(ssl_disabled=True) self.connection.extra = json.dumps(extra_dict) self.db_hook.get_conn() assert mock_connect.call_count == 1 args, kwargs = mock_connect.call_args assert args == () assert kwargs["ssl_disabled"] == 1 @mock.patch("mysql.connector.connect") def test_get_conn_init_command(self, mock_connect): extra_dict = self.connection.extra_dejson self.connection.extra = json.dumps(extra_dict) self.db_hook.init_command = "SET time_zone = '+00:00';" self.db_hook.get_conn() assert mock_connect.call_count == 1 args, kwargs = mock_connect.call_args assert args == () assert kwargs["init_command"] == "SET time_zone = '+00:00';"
TestMySqlHookConnMySqlConnectorPython
python
tensorflow__tensorflow
tensorflow/python/checkpoint/checkpoint_test.py
{ "start": 56429, "end": 59472 }
class ____(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_keys_and_metadata(self): class MultiTensor(base.Trackable): def __init__(self, v1, v2): self.v1 = v1 self.v2 = v2 def _serialize_to_tensors(self): return {"v1": self.v1, "v2": self.v2} def _restore_from_tensors(self, restored_tensors): return control_flow_ops.group( self.v1.assign(restored_tensors["v1"]), self.v2.assign(restored_tensors["v2"])) root = MultiTensor(variables_lib.Variable(1), variables_lib.Variable(2)) child = MultiTensor(variables_lib.Variable(3), variables_lib.Variable(4)) ckpt = trackable_utils.Checkpoint(root=root, child=child) self.evaluate([root.v1.initializer, root.v2.initializer, child.v1.initializer, child.v2.initializer]) save_path = ckpt.save(os.path.join(self.get_temp_dir(), "ckpt")) # Check the checkpoint contents and metadata. reader = checkpoint_utils.load_checkpoint(save_path) object_proto = trackable_utils.object_metadata(save_path) root_attributes = object_proto.nodes[0].attributes self.assertLen(root_attributes, 2) self.assertDictEqual( {"v1": "/.ATTRIBUTES/v1", "v2": "/.ATTRIBUTES/v2"}, {attr.name: attr.checkpoint_key for attr in root_attributes}) self.assertEqual(1, reader.get_tensor("/.ATTRIBUTES/v1")) self.assertEqual(2, reader.get_tensor("/.ATTRIBUTES/v2")) child_attributes = object_proto.nodes[1].attributes self.assertLen(child_attributes, 2) self.assertDictEqual( {"v1": "child/.ATTRIBUTES/v1", "v2": "child/.ATTRIBUTES/v2"}, {attr.name: attr.checkpoint_key for attr in child_attributes}) self.assertEqual(3, reader.get_tensor("child/.ATTRIBUTES/v1")) self.assertEqual(4, reader.get_tensor("child/.ATTRIBUTES/v2")) # Try restoring the checkpoint. self.evaluate([root.v1.assign(0), root.v2.assign(0), child.v1.assign(0), child.v2.assign(0)]) ckpt.restore(save_path).assert_consumed().run_restore_ops() self.assertAllEqual([1, 2, 3, 4], self.evaluate([root.v1, root.v2, child.v1, child.v2])) @test_util.run_in_graph_and_eager_modes def test_reference_variable(self): # Test that refvariable is compatible with tf1 saver / tf2 checkpoint. with self.cached_session() as sess: root = autotrackable.AutoTrackable() root.v = variable_v1.VariableV1(5, use_resource=False) sess.run(root.v.initializer) ckpt = trackable_utils.Checkpoint(root) ckpt_path = os.path.join(self.get_temp_dir(), "ckpt") ckpt.write(ckpt_path) sess.run(root.v.assign(10)) saver = saver_lib.Saver(var_list=[root.v]) save_path = saver.save(sess, os.path.join(self.get_temp_dir(), "saver")) ckpt.read(ckpt_path).assert_consumed().run_restore_ops() self.assertEqual(5, sess.run(root.v)) saver.restore(sess, save_path) self.assertEqual(10, sess.run(root.v))
SerializeToTensorTest
python
optuna__optuna
optuna/storages/_in_memory.py
{ "start": 681, "end": 15186 }
class ____(BaseStorage): """Storage class that stores data in memory of the Python process. Example: Create an :class:`~optuna.storages.InMemoryStorage` instance. .. testcode:: import optuna def objective(trial): x = trial.suggest_float("x", -100, 100) return x**2 storage = optuna.storages.InMemoryStorage() study = optuna.create_study(storage=storage) study.optimize(objective, n_trials=10) """ def __init__(self) -> None: self._trial_id_to_study_id_and_number: dict[int, tuple[int, int]] = {} self._study_name_to_id: dict[str, int] = {} self._studies: dict[int, _StudyInfo] = {} self._max_study_id = -1 self._max_trial_id = -1 self._lock = threading.RLock() self._prev_waiting_trial_number: dict[int, int] = {} def __getstate__(self) -> dict[Any, Any]: state = self.__dict__.copy() del state["_lock"] return state def __setstate__(self, state: dict[Any, Any]) -> None: self.__dict__.update(state) self._lock = threading.RLock() def create_new_study( self, directions: Sequence[StudyDirection], study_name: str | None = None ) -> int: with self._lock: study_id = self._max_study_id + 1 self._max_study_id += 1 if study_name is not None: if study_name in self._study_name_to_id: raise DuplicatedStudyError else: study_uuid = str(uuid.uuid4()) study_name = DEFAULT_STUDY_NAME_PREFIX + study_uuid self._studies[study_id] = _StudyInfo(study_name, list(directions)) self._study_name_to_id[study_name] = study_id self._prev_waiting_trial_number[study_id] = 0 _logger.info("A new study created in memory with name: {}".format(study_name)) return study_id def delete_study(self, study_id: int) -> None: with self._lock: self._check_study_id(study_id) for trial in self._studies[study_id].trials: del self._trial_id_to_study_id_and_number[trial._trial_id] study_name = self._studies[study_id].name del self._study_name_to_id[study_name] del self._studies[study_id] del self._prev_waiting_trial_number[study_id] def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None: with self._lock: self._check_study_id(study_id) self._studies[study_id].user_attrs[key] = value def set_study_system_attr(self, study_id: int, key: str, value: JSONSerializable) -> None: with self._lock: self._check_study_id(study_id) self._studies[study_id].system_attrs[key] = value def get_study_id_from_name(self, study_name: str) -> int: with self._lock: if study_name not in self._study_name_to_id: raise KeyError("No such study {}.".format(study_name)) return self._study_name_to_id[study_name] def get_study_name_from_id(self, study_id: int) -> str: with self._lock: self._check_study_id(study_id) return self._studies[study_id].name def get_study_directions(self, study_id: int) -> list[StudyDirection]: with self._lock: self._check_study_id(study_id) return self._studies[study_id].directions def get_study_user_attrs(self, study_id: int) -> dict[str, Any]: with self._lock: self._check_study_id(study_id) return self._studies[study_id].user_attrs def get_study_system_attrs(self, study_id: int) -> dict[str, Any]: with self._lock: self._check_study_id(study_id) return self._studies[study_id].system_attrs def get_all_studies(self) -> list[FrozenStudy]: with self._lock: return [self._build_frozen_study(study_id) for study_id in self._studies] def _build_frozen_study(self, study_id: int) -> FrozenStudy: study = self._studies[study_id] return FrozenStudy( study_name=study.name, direction=None, directions=study.directions, user_attrs=copy.deepcopy(study.user_attrs), system_attrs=copy.deepcopy(study.system_attrs), study_id=study_id, ) def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int: with self._lock: self._check_study_id(study_id) if template_trial is None: trial = self._create_running_trial() else: trial = copy.deepcopy(template_trial) trial_id = self._max_trial_id + 1 self._max_trial_id += 1 trial.number = len(self._studies[study_id].trials) trial._trial_id = trial_id self._trial_id_to_study_id_and_number[trial_id] = (study_id, trial.number) self._studies[study_id].trials.append(trial) self._update_cache(trial_id, study_id) return trial_id @staticmethod def _create_running_trial() -> FrozenTrial: return FrozenTrial( trial_id=-1, # dummy value. number=-1, # dummy value. state=TrialState.RUNNING, params={}, distributions={}, user_attrs={}, system_attrs={}, value=None, intermediate_values={}, datetime_start=datetime.now(), datetime_complete=None, ) def set_trial_param( self, trial_id: int, param_name: str, param_value_internal: float, distribution: distributions.BaseDistribution, ) -> None: with self._lock: trial = self._get_trial(trial_id) self.check_trial_is_updatable(trial_id, trial.state) study_id = self._trial_id_to_study_id_and_number[trial_id][0] # Check param distribution compatibility with previous trial(s). if param_name in self._studies[study_id].param_distribution: distributions.check_distribution_compatibility( self._studies[study_id].param_distribution[param_name], distribution ) # Set param distribution. self._studies[study_id].param_distribution[param_name] = distribution # Set param. trial = copy.copy(trial) trial.params = copy.copy(trial.params) trial.params[param_name] = distribution.to_external_repr(param_value_internal) trial.distributions = copy.copy(trial.distributions) trial.distributions[param_name] = distribution self._set_trial(trial_id, trial) def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int: with self._lock: study = self._studies.get(study_id) if study is None: raise KeyError("No study with study_id {} exists.".format(study_id)) trials = study.trials if len(trials) <= trial_number: raise KeyError( "No trial with trial number {} exists in study with study_id {}.".format( trial_number, study_id ) ) trial = trials[trial_number] assert trial.number == trial_number return trial._trial_id def get_trial_number_from_id(self, trial_id: int) -> int: with self._lock: self._check_trial_id(trial_id) return self._trial_id_to_study_id_and_number[trial_id][1] def get_best_trial(self, study_id: int) -> FrozenTrial: with self._lock: self._check_study_id(study_id) best_trial_id = self._studies[study_id].best_trial_id if best_trial_id is None: raise ValueError("No trials are completed yet.") elif len(self._studies[study_id].directions) > 1: raise RuntimeError( "Best trial can be obtained only for single-objective optimization." ) return self.get_trial(best_trial_id) def get_trial_param(self, trial_id: int, param_name: str) -> float: with self._lock: trial = self._get_trial(trial_id) distribution = trial.distributions[param_name] return distribution.to_internal_repr(trial.params[param_name]) def set_trial_state_values( self, trial_id: int, state: TrialState, values: Sequence[float] | None = None ) -> bool: with self._lock: trial = copy.copy(self._get_trial(trial_id)) self.check_trial_is_updatable(trial_id, trial.state) if state == TrialState.RUNNING and trial.state != TrialState.WAITING: return False trial.state = state if values is not None: trial.values = values if state == TrialState.RUNNING: trial.datetime_start = datetime.now() if state.is_finished(): trial.datetime_complete = datetime.now() self._set_trial(trial_id, trial) study_id = self._trial_id_to_study_id_and_number[trial_id][0] self._update_cache(trial_id, study_id) else: self._set_trial(trial_id, trial) return True def _update_cache(self, trial_id: int, study_id: int) -> None: trial = self._get_trial(trial_id) if trial.state != TrialState.COMPLETE: return best_trial_id = self._studies[study_id].best_trial_id if best_trial_id is None: self._studies[study_id].best_trial_id = trial_id return _directions = self.get_study_directions(study_id) if len(_directions) > 1: return direction = _directions[0] best_trial = self._get_trial(best_trial_id) assert best_trial is not None if best_trial.value is None: self._studies[study_id].best_trial_id = trial_id return # Complete trials do not have `None` values. assert trial.value is not None best_value = best_trial.value new_value = trial.value if direction == StudyDirection.MAXIMIZE: if best_value < new_value: self._studies[study_id].best_trial_id = trial_id else: if best_value > new_value: self._studies[study_id].best_trial_id = trial_id def set_trial_intermediate_value( self, trial_id: int, step: int, intermediate_value: float ) -> None: with self._lock: trial = self._get_trial(trial_id) self.check_trial_is_updatable(trial_id, trial.state) trial = copy.copy(trial) trial.intermediate_values = copy.copy(trial.intermediate_values) trial.intermediate_values[step] = intermediate_value self._set_trial(trial_id, trial) def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None: with self._lock: self._check_trial_id(trial_id) trial = self._get_trial(trial_id) self.check_trial_is_updatable(trial_id, trial.state) trial = copy.copy(trial) trial.user_attrs = copy.copy(trial.user_attrs) trial.user_attrs[key] = value self._set_trial(trial_id, trial) def set_trial_system_attr(self, trial_id: int, key: str, value: JSONSerializable) -> None: with self._lock: trial = self._get_trial(trial_id) self.check_trial_is_updatable(trial_id, trial.state) trial = copy.copy(trial) trial.system_attrs = copy.copy(trial.system_attrs) trial.system_attrs[key] = value self._set_trial(trial_id, trial) def get_trial(self, trial_id: int) -> FrozenTrial: with self._lock: return self._get_trial(trial_id) def _get_trial(self, trial_id: int) -> FrozenTrial: self._check_trial_id(trial_id) study_id, trial_number = self._trial_id_to_study_id_and_number[trial_id] return self._studies[study_id].trials[trial_number] def _set_trial(self, trial_id: int, trial: FrozenTrial) -> None: study_id, trial_number = self._trial_id_to_study_id_and_number[trial_id] self._studies[study_id].trials[trial_number] = trial def get_all_trials( self, study_id: int, deepcopy: bool = True, states: Container[TrialState] | None = None, ) -> list[FrozenTrial]: with self._lock: self._check_study_id(study_id) # Optimized retrieval of trials in the WAITING state to improve performance # for the call, `get_all_trials(states=(TrialState.WAITING,))`. if states == (TrialState.WAITING,): trials: list[FrozenTrial] = [] for trial in self._studies[study_id].trials[ self._prev_waiting_trial_number[study_id] : ]: if trial.state == TrialState.WAITING: if not trials: self._prev_waiting_trial_number[study_id] = trial.number trials.append(trial) if not trials: self._prev_waiting_trial_number[study_id] = len(self._studies[study_id].trials) else: trials = self._studies[study_id].trials if states is not None: trials = [t for t in trials if t.state in states] if deepcopy: trials = copy.deepcopy(trials) else: # This copy is required for the replacing trick in `set_trial_xxx`. trials = copy.copy(trials) return trials def _check_study_id(self, study_id: int) -> None: if study_id not in self._studies: raise KeyError("No study with study_id {} exists.".format(study_id)) def _check_trial_id(self, trial_id: int) -> None: if trial_id not in self._trial_id_to_study_id_and_number: raise KeyError("No trial with trial_id {} exists.".format(trial_id))
InMemoryStorage
python
google__jax
tests/lobpcg_test.py
{ "start": 12402, "end": 13968 }
class ____(LobpcgTest): def setUp(self): # TODO(phawkins): investigate this failure if jtu.test_device_matches(["gpu"]): raise unittest.SkipTest("Test is failing on CUDA gpus") super().setUp() def testLobpcgValidatesArguments(self): A, _ = _concrete_generators(np.float32)['id'](100, 10) X = self.rng().standard_normal(size=(100, 10)).astype(np.float32) with self.assertRaisesRegex(ValueError, 'search dim > 0'): linalg.lobpcg_standard(A, X[:,:0]) with self.assertRaisesRegex(ValueError, 'A, X must have same dtypes'): linalg.lobpcg_standard( lambda x: jnp.array(A).dot(x).astype(jnp.float16), X) with self.assertRaisesRegex(ValueError, r'A must be \(100, 100\)'): linalg.lobpcg_standard(A[:60, :], X) with self.assertRaisesRegex(ValueError, r'search dim \* 5 < matrix dim'): linalg.lobpcg_standard(A[:50, :50], X[:50]) @parameterized.named_parameters(_make_concrete_cases(f64=False)) @jtu.skip_on_devices("gpu") def testLobpcgConsistencyF32(self, matrix_name, n, k, m, tol): self.checkLobpcgConsistency(matrix_name, n, k, m, tol, jnp.float32) @parameterized.named_parameters(_make_concrete_cases(f64=False)) def testLobpcgMonotonicityF32(self, matrix_name, n, k, m, tol): self.checkLobpcgMonotonicity(matrix_name, n, k, m, tol, jnp.float32) @parameterized.named_parameters(_make_callable_cases(f64=False)) def testCallableMatricesF32(self, matrix_name): self.checkApproxEigs(matrix_name, jnp.float32) @jtu.with_config(jax_enable_x64=True)
F32LobpcgTest
python
lazyprogrammer__machine_learning_examples
ann_class2/dropout_tensorflow.py
{ "start": 961, "end": 5004 }
class ____(object): def __init__(self, hidden_layer_sizes, p_keep): self.hidden_layer_sizes = hidden_layer_sizes self.dropout_rates = p_keep def fit(self, X, Y, Xvalid, Yvalid, lr=1e-4, mu=0.9, decay=0.9, epochs=15, batch_sz=100, print_every=50): X = X.astype(np.float32) Y = Y.astype(np.int64) Xvalid = Xvalid.astype(np.float32) Yvalid = Yvalid.astype(np.int64) # initialize hidden layers N, D = X.shape K = len(set(Y)) self.hidden_layers = [] M1 = D for M2 in self.hidden_layer_sizes: h = HiddenLayer(M1, M2) self.hidden_layers.append(h) M1 = M2 W = np.random.randn(M1, K) * np.sqrt(2.0 / M1) b = np.zeros(K) self.W = tf.Variable(W.astype(np.float32)) self.b = tf.Variable(b.astype(np.float32)) # collect params for later use self.params = [self.W, self.b] for h in self.hidden_layers: self.params += h.params # set up theano functions and variables inputs = tf.placeholder(tf.float32, shape=(None, D), name='inputs') labels = tf.placeholder(tf.int64, shape=(None,), name='labels') logits = self.forward(inputs) cost = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels ) ) train_op = tf.train.RMSPropOptimizer(lr, decay=decay, momentum=mu).minimize(cost) # train_op = tf.train.MomentumOptimizer(lr, momentum=mu).minimize(cost) # train_op = tf.train.AdamOptimizer(lr).minimize(cost) prediction = self.predict(inputs) # validation cost will be calculated separately since nothing will be dropped test_logits = self.forward_test(inputs) test_cost = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=test_logits, labels=labels ) ) n_batches = N // batch_sz costs = [] init = tf.global_variables_initializer() with tf.Session() as session: session.run(init) for i in range(epochs): print("epoch:", i, "n_batches:", n_batches) X, Y = shuffle(X, Y) for j in range(n_batches): Xbatch = X[j*batch_sz:(j*batch_sz+batch_sz)] Ybatch = Y[j*batch_sz:(j*batch_sz+batch_sz)] session.run(train_op, feed_dict={inputs: Xbatch, labels: Ybatch}) if j % print_every == 0: c = session.run(test_cost, feed_dict={inputs: Xvalid, labels: Yvalid}) p = session.run(prediction, feed_dict={inputs: Xvalid}) costs.append(c) e = error_rate(Yvalid, p) print("i:", i, "j:", j, "nb:", n_batches, "cost:", c, "error rate:", e) plt.plot(costs) plt.show() def forward(self, X): # tf.nn.dropout scales inputs by 1/p_keep # therefore, during test time, we don't have to scale anything Z = X Z = tf.nn.dropout(Z, self.dropout_rates[0]) for h, p in zip(self.hidden_layers, self.dropout_rates[1:]): Z = h.forward(Z) Z = tf.nn.dropout(Z, p) return tf.matmul(Z, self.W) + self.b def forward_test(self, X): Z = X for h in self.hidden_layers: Z = h.forward(Z) return tf.matmul(Z, self.W) + self.b def predict(self, X): pY = self.forward_test(X) return tf.argmax(pY, 1) def error_rate(p, t): return np.mean(p != t) def relu(a): return a * (a > 0) def main(): # step 1: get the data and define all the usual variables Xtrain, Xtest, Ytrain, Ytest = get_normalized_data() ann = ANN([500, 300], [0.8, 0.5, 0.5]) ann.fit(Xtrain, Ytrain, Xtest, Ytest) if __name__ == '__main__': main()
ANN
python
ray-project__ray
python/ray/_private/thirdparty/pathspec/util.py
{ "start": 12252, "end": 13313 }
class ____(Exception): """ The :exc:`AlreadyRegisteredError` exception is raised when a pattern factory is registered under a name already in use. """ def __init__(self, name, pattern_factory): """ Initializes the :exc:`AlreadyRegisteredError` instance. *name* (:class:`str`) is the name of the registered pattern. *pattern_factory* (:class:`~collections.abc.Callable`) is the registered pattern factory. """ super(AlreadyRegisteredError, self).__init__(name, pattern_factory) @property def message(self): """ *message* (:class:`str`) is the error message. """ return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format( name=self.name, pattern_factory=self.pattern_factory, ) @property def name(self): """ *name* (:class:`str`) is the name of the registered pattern. """ return self.args[0] @property def pattern_factory(self): """ *pattern_factory* (:class:`~collections.abc.Callable`) is the registered pattern factory. """ return self.args[1]
AlreadyRegisteredError
python
scipy__scipy
scipy/stats/_continuous_distns.py
{ "start": 16392, "end": 17295 }
class ____(rv_continuous): r"""An arcsine continuous random variable. %(before_notes)s Notes ----- The probability density function for `arcsine` is: .. math:: f(x) = \frac{1}{\pi \sqrt{x (1-x)}} for :math:`0 < x < 1`. %(after_notes)s %(example)s """ def _shape_info(self): return [] def _pdf(self, x): # arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x))) with np.errstate(divide='ignore'): return 1.0/np.pi/np.sqrt(x*(1-x)) def _cdf(self, x): return 2.0/np.pi*np.arcsin(np.sqrt(x)) def _ppf(self, q): return np.sin(np.pi/2.0*q)**2.0 def _stats(self): mu = 0.5 mu2 = 1.0/8 g1 = 0 g2 = -3.0/2.0 return mu, mu2, g1, g2 def _entropy(self): return -0.24156447527049044468 arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
arcsine_gen
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typedDict13.py
{ "start": 553, "end": 677 }
class ____(ParentC): # This should generate an error because "x" is Required in the parent. x: NotRequired[int]
ChildC
python
keon__algorithms
algorithms/heap/binary_heap.py
{ "start": 1018, "end": 1486 }
class ____(metaclass=ABCMeta): """Abstract Class for Binary Heap.""" def __init__(self): """Pass.""" @abstractmethod def perc_up(self, i): """Pass.""" @abstractmethod def insert(self, val): """Pass.""" @abstractmethod def perc_down(self, i): """Pass.""" @abstractmethod def min_child(self, i): """Pass.""" @abstractmethod def remove_min(self): """Pass."""
AbstractHeap
python
kamyu104__LeetCode-Solutions
Python/shift-2d-grid.py
{ "start": 33, "end": 834 }
class ____(object): def shiftGrid(self, grid, k): """ :type grid: List[List[int]] :type k: int :rtype: List[List[int]] """ def rotate(grids, k): def reverse(grid, start, end): while start < end: start_r, start_c = divmod(start, len(grid[0])) end_r, end_c = divmod(end-1, len(grid[0])) grid[start_r][start_c], grid[end_r][end_c] = grid[end_r][end_c], grid[start_r][start_c] start += 1 end -= 1 k %= len(grid)*len(grid[0]) reverse(grid, 0, len(grid)*len(grid[0])) reverse(grid, 0, k) reverse(grid, k, len(grid)*len(grid[0])) rotate(grid, k) return grid
Solution
python
apache__airflow
helm-tests/tests/helm_tests/airflow_core/test_api_server.py
{ "start": 1224, "end": 24338 }
class ____: """Tests api-server deployment.""" @pytest.mark.parametrize( ("revision_history_limit", "global_revision_history_limit"), [(8, 10), (10, 8), (8, None), (None, 10), (None, None)], ) def test_revision_history_limit(self, revision_history_limit, global_revision_history_limit): values = {"apiServer": {}} if revision_history_limit: values["apiServer"]["revisionHistoryLimit"] = revision_history_limit if global_revision_history_limit: values["revisionHistoryLimit"] = global_revision_history_limit docs = render_chart( values=values, show_only=["templates/api-server/api-server-deployment.yaml"], ) expected_result = revision_history_limit if revision_history_limit else global_revision_history_limit assert jmespath.search("spec.revisionHistoryLimit", docs[0]) == expected_result def test_should_add_scheme_to_liveness_and_readiness_and_startup_probes(self): docs = render_chart( values={ "apiServer": { "livenessProbe": {"scheme": "HTTPS"}, "readinessProbe": {"scheme": "HTTPS"}, "startupProbe": {"scheme": "HTTPS"}, } }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert "HTTPS" in jmespath.search( "spec.template.spec.containers[0].livenessProbe.httpGet.scheme", docs[0] ) assert "HTTPS" in jmespath.search( "spec.template.spec.containers[0].readinessProbe.httpGet.scheme", docs[0] ) assert "HTTPS" in jmespath.search( "spec.template.spec.containers[0].startupProbe.httpGet.scheme", docs[0] ) def test_should_add_extra_containers(self): docs = render_chart( values={ "executor": "CeleryExecutor", "apiServer": { "extraContainers": [ {"name": "{{.Chart.Name}}", "image": "test-registry/test-repo:test-tag"} ], }, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.template.spec.containers[-1]", docs[0]) == { "name": "airflow", "image": "test-registry/test-repo:test-tag", } def test_should_add_extraEnvs(self): docs = render_chart( values={ "apiServer": { "env": [ {"name": "TEST_ENV_1", "value": "test_env_1"}, { "name": "TEST_ENV_2", "valueFrom": {"configMapKeyRef": {"name": "test-config", "key": "test-key"}}, }, { "name": "TEST_ENV_3", "valueFrom": {"secretKeyRef": {"name": "test-secret", "key": "test-key"}}, }, ], }, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) env_result = jmespath.search("spec.template.spec.containers[0].env", docs[0]) assert {"name": "TEST_ENV_1", "value": "test_env_1"} in env_result assert { "name": "TEST_ENV_2", "valueFrom": {"configMapKeyRef": {"name": "test-config", "key": "test-key"}}, } in env_result assert { "name": "TEST_ENV_3", "valueFrom": {"secretKeyRef": {"name": "test-secret", "key": "test-key"}}, } in env_result def test_should_add_extra_volume_and_extra_volume_mount(self): docs = render_chart( values={ "apiServer": { "extraVolumes": [{"name": "test-volume-{{ .Chart.Name }}", "emptyDir": {}}], "extraVolumeMounts": [ {"name": "test-volume-{{ .Chart.Name }}", "mountPath": "/opt/test"} ], }, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.template.spec.volumes[-1].name", docs[0]) == "test-volume-airflow" assert ( jmespath.search("spec.template.spec.containers[0].volumeMounts[-1].name", docs[0]) == "test-volume-airflow" ) assert ( jmespath.search("spec.template.spec.initContainers[0].volumeMounts[-1].name", docs[0]) == "test-volume-airflow" ) def test_should_add_global_volume_and_global_volume_mount(self): docs = render_chart( values={ "volumes": [{"name": "test-volume", "emptyDir": {}}], "volumeMounts": [{"name": "test-volume", "mountPath": "/opt/test"}], }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.template.spec.volumes[-1].name", docs[0]) == "test-volume" assert ( jmespath.search("spec.template.spec.containers[0].volumeMounts[-1].name", docs[0]) == "test-volume" ) def test_should_add_extraEnvs_to_wait_for_migration_container(self): docs = render_chart( values={ "apiServer": { "waitForMigrations": { "env": [{"name": "TEST_ENV_1", "value": "test_env_1"}], }, }, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert {"name": "TEST_ENV_1", "value": "test_env_1"} in jmespath.search( "spec.template.spec.initContainers[0].env", docs[0] ) def test_wait_for_migration_airflow_version(self): expected_arg = ["airflow", "db", "check-migrations", "--migration-wait-timeout=60"] docs = render_chart( show_only=["templates/api-server/api-server-deployment.yaml"], ) # Don't test the full string, just the length of the expect matches actual = jmespath.search("spec.template.spec.initContainers[0].args", docs[0]) assert expected_arg == actual[: len(expected_arg)] def test_disable_wait_for_migration(self): docs = render_chart( values={ "apiServer": { "waitForMigrations": {"enabled": False}, }, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) actual = jmespath.search( "spec.template.spec.initContainers[?name=='wait-for-airflow-migrations']", docs[0] ) assert actual is None def test_should_add_extra_init_containers(self): docs = render_chart( values={ "apiServer": { "extraInitContainers": [ {"name": "test-init-container", "image": "test-registry/test-repo:test-tag"} ], }, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.template.spec.initContainers[-1]", docs[0]) == { "name": "test-init-container", "image": "test-registry/test-repo:test-tag", } def test_should_add_component_specific_labels(self): docs = render_chart( values={ "apiServer": { "labels": {"test_label": "test_label_value"}, }, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert "test_label" in jmespath.search("spec.template.metadata.labels", docs[0]) assert jmespath.search("spec.template.metadata.labels", docs[0])["test_label"] == "test_label_value" def test_should_create_valid_affinity_tolerations_and_node_selector(self): docs = render_chart( values={ "apiServer": { "affinity": { "nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [ { "matchExpressions": [ {"key": "foo", "operator": "In", "values": ["true"]}, ] } ] } } }, "tolerations": [ {"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"} ], "nodeSelector": {"diskType": "ssd"}, } }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("kind", docs[0]) == "Deployment" assert ( jmespath.search( "spec.template.spec.affinity.nodeAffinity." "requiredDuringSchedulingIgnoredDuringExecution." "nodeSelectorTerms[0]." "matchExpressions[0]." "key", docs[0], ) == "foo" ) assert ( jmespath.search( "spec.template.spec.nodeSelector.diskType", docs[0], ) == "ssd" ) assert ( jmespath.search( "spec.template.spec.tolerations[0].key", docs[0], ) == "dynamic-pods" ) def test_should_create_default_affinity(self): docs = render_chart( show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search( "spec.template.spec.affinity.podAntiAffinity." "preferredDuringSchedulingIgnoredDuringExecution[0]." "podAffinityTerm.labelSelector.matchLabels", docs[0], ) == {"component": "api-server"} def test_affinity_tolerations_topology_spread_constraints_and_node_selector_precedence(self): """When given both global and api-server affinity etc, api-server affinity etc is used.""" expected_affinity = { "nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [ { "matchExpressions": [ {"key": "foo", "operator": "In", "values": ["true"]}, ] } ] } } } expected_topology_spread_constraints = { "maxSkew": 1, "topologyKey": "foo", "whenUnsatisfiable": "ScheduleAnyway", "labelSelector": {"matchLabels": {"tier": "airflow"}}, } docs = render_chart( values={ "apiServer": { "affinity": expected_affinity, "tolerations": [ {"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"} ], "topologySpreadConstraints": [expected_topology_spread_constraints], "nodeSelector": {"type": "ssd"}, }, "affinity": { "nodeAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [ { "weight": 1, "preference": { "matchExpressions": [ {"key": "not-me", "operator": "In", "values": ["true"]}, ] }, } ] } }, "tolerations": [ {"key": "not-me", "operator": "Equal", "value": "true", "effect": "NoSchedule"} ], "topologySpreadConstraints": [ { "maxSkew": 1, "topologyKey": "not-me", "whenUnsatisfiable": "ScheduleAnyway", "labelSelector": {"matchLabels": {"tier": "airflow"}}, } ], "nodeSelector": {"type": "not-me"}, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert expected_affinity == jmespath.search("spec.template.spec.affinity", docs[0]) assert ( jmespath.search( "spec.template.spec.nodeSelector.type", docs[0], ) == "ssd" ) tolerations = jmespath.search("spec.template.spec.tolerations", docs[0]) assert len(tolerations) == 1 assert tolerations[0]["key"] == "dynamic-pods" assert expected_topology_spread_constraints == jmespath.search( "spec.template.spec.topologySpreadConstraints[0]", docs[0] ) def test_scheduler_name(self): docs = render_chart( values={"schedulerName": "airflow-scheduler"}, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert ( jmespath.search( "spec.template.spec.schedulerName", docs[0], ) == "airflow-scheduler" ) @pytest.mark.parametrize( ("log_persistence_values", "expected_claim_name"), [ ({"enabled": False}, None), ({"enabled": True}, "release-name-logs"), ({"enabled": True, "existingClaim": "test-claim"}, "test-claim"), ], ) def test_logs_persistence_adds_volume_and_mount(self, log_persistence_values, expected_claim_name): docs = render_chart( values={"logs": {"persistence": log_persistence_values}}, show_only=["templates/api-server/api-server-deployment.yaml"], ) if expected_claim_name: assert { "name": "logs", "persistentVolumeClaim": {"claimName": expected_claim_name}, } in jmespath.search("spec.template.spec.volumes", docs[0]) assert { "name": "logs", "mountPath": "/opt/airflow/logs", } in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0]) else: assert "logs" not in [v["name"] for v in jmespath.search("spec.template.spec.volumes", docs[0])] assert "logs" not in [ v["name"] for v in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0]) ] def test_config_volumes(self): docs = render_chart( show_only=["templates/api-server/api-server-deployment.yaml"], ) # default config assert { "name": "config", "mountPath": "/opt/airflow/airflow.cfg", "readOnly": True, "subPath": "airflow.cfg", } in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0]) def testapi_server_resources_are_configurable(self): docs = render_chart( values={ "apiServer": { "resources": { "limits": {"cpu": "200m", "memory": "128Mi"}, "requests": {"cpu": "300m", "memory": "169Mi"}, }, }, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.template.spec.containers[0].resources.limits.memory", docs[0]) == "128Mi" assert jmespath.search("spec.template.spec.containers[0].resources.limits.cpu", docs[0]) == "200m" assert ( jmespath.search("spec.template.spec.containers[0].resources.requests.memory", docs[0]) == "169Mi" ) assert jmespath.search("spec.template.spec.containers[0].resources.requests.cpu", docs[0]) == "300m" # initContainer wait-for-airflow-migrations assert ( jmespath.search("spec.template.spec.initContainers[0].resources.limits.memory", docs[0]) == "128Mi" ) assert jmespath.search("spec.template.spec.initContainers[0].resources.limits.cpu", docs[0]) == "200m" assert ( jmespath.search("spec.template.spec.initContainers[0].resources.requests.memory", docs[0]) == "169Mi" ) assert ( jmespath.search("spec.template.spec.initContainers[0].resources.requests.cpu", docs[0]) == "300m" ) def test_api_server_security_contexts_are_configurable(self): docs = render_chart( values={ "apiServer": { "securityContexts": { "pod": { "fsGroup": 1000, "runAsGroup": 1001, "runAsNonRoot": True, "runAsUser": 2000, }, "container": { "allowPrivilegeEscalation": False, "readOnlyRootFilesystem": True, }, }, }, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.template.spec.containers[0].securityContext", docs[0]) == { "allowPrivilegeEscalation": False, "readOnlyRootFilesystem": True, } assert jmespath.search("spec.template.spec.securityContext", docs[0]) == { "runAsUser": 2000, "runAsGroup": 1001, "fsGroup": 1000, "runAsNonRoot": True, } def test_api_server_security_context_legacy(self): with pytest.raises(CalledProcessError, match="Additional property securityContext is not allowed"): render_chart( values={ "apiServer": { "securityContext": { "fsGroup": 1000, "runAsGroup": 1001, "runAsNonRoot": True, "runAsUser": 2000, }, }, }, show_only=["templates/api-server/api-server-deployment.yaml"], ) def test_api_server_resources_are_not_added_by_default(self): docs = render_chart( show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.template.spec.containers[0].resources", docs[0]) == {} assert jmespath.search("spec.template.spec.initContainers[0].resources", docs[0]) == {} @pytest.mark.parametrize( ("airflow_version", "strategy", "expected_strategy"), [ pytest.param( "3.0.0", None, {"type": "RollingUpdate", "rollingUpdate": {"maxSurge": 1, "maxUnavailable": 0}}, id="default", ), pytest.param( "3.0.0", {"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": 1}}, {"type": "RollingUpdate", "rollingUpdate": {"maxSurge": 1, "maxUnavailable": 0}}, id="custom-strategy", ), ], ) def test_update_strategy(self, airflow_version, strategy, expected_strategy): docs = render_chart( values={"airflowVersion": airflow_version, "apiServer": {"strategy": expected_strategy}}, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.strategy", docs[0]) == expected_strategy def test_default_command_and_args(self): docs = render_chart( show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.template.spec.containers[0].command", docs[0]) is None assert jmespath.search("spec.template.spec.containers[0].args", docs[0]) == [ "bash", "-c", "exec airflow api-server", ] @pytest.mark.parametrize("command", [None, ["custom", "command"]]) @pytest.mark.parametrize("args", [None, ["custom", "args"]]) def test_command_and_args_overrides(self, command, args): docs = render_chart( values={"apiServer": {"command": command, "args": args}}, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert command == jmespath.search("spec.template.spec.containers[0].command", docs[0]) assert args == jmespath.search("spec.template.spec.containers[0].args", docs[0]) def test_command_and_args_overrides_are_templated(self): docs = render_chart( values={ "apiServer": { "command": ["{{ .Release.Name }}"], "args": ["{{ .Release.Service }}"], } }, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.template.spec.containers[0].command", docs[0]) == ["release-name"] assert jmespath.search("spec.template.spec.containers[0].args", docs[0]) == ["Helm"] def test_should_add_component_specific_annotations(self): docs = render_chart( values={"apiServer": {"annotations": {"test_annotation": "test_annotation_value"}}}, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert "annotations" in jmespath.search("metadata", docs[0]) assert jmespath.search("metadata.annotations", docs[0])["test_annotation"] == "test_annotation_value" def test_api_server_pod_hostaliases(self): docs = render_chart( values={"apiServer": {"hostAliases": [{"ip": "127.0.0.1", "hostnames": ["foo.local"]}]}}, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert jmespath.search("spec.template.spec.hostAliases[0].ip", docs[0]) == "127.0.0.1" assert jmespath.search("spec.template.spec.hostAliases[0].hostnames[0]", docs[0]) == "foo.local" def test_can_be_disabled(self): """ API server can be disabled by configuration. """ docs = render_chart( values={"apiServer": {"enabled": False}}, show_only=["templates/api-server/api-server-deployment.yaml"], ) assert len(docs) == 0
TestAPIServerDeployment
python
tensorflow__tensorflow
tensorflow/python/debug/lib/session_debug_file_test.py
{ "start": 4514, "end": 5117 }
class ____( session_debug_testlib.DebugConcurrentRunCallsTest): def setUp(self): self._num_concurrent_runs = 3 self._dump_roots = [] for _ in range(self._num_concurrent_runs): self._dump_roots.append(tempfile.mkdtemp()) def tearDown(self): ops.reset_default_graph() for dump_root in self._dump_roots: if os.path.isdir(dump_root): file_io.delete_recursively(dump_root) def _get_concurrent_debug_urls(self): return [("file://%s" % dump_root) for dump_root in self._dump_roots] if __name__ == "__main__": googletest.main()
SessionDebugConcurrentTest
python
google__jax
jax/experimental/jax2tf/tests/jax2tf_limitations.py
{ "start": 882, "end": 7546 }
class ____(test_harnesses.Limitation): """Specific primitive limitations for jax2tf. See the primitive_test module docstring for details. """ def __init__( self, description: str, *, devices: str | Sequence[str] = ("cpu", "gpu", "tpu"), dtypes: Sequence[DType] = (), enabled: bool = True, # jax2tf specific modes=("eager", "graph", "compiled"), skip_tf_run=False, expect_tf_error: bool = True, skip_comparison=False, custom_assert: Callable | None = None, tol=None): """See the test_harnesses.Limitation common arguments. Args : modes: one of "eager", "graph", "compiled" for_native_serialization: A bitmask with some of {FOR_NATIVE, FOR_NON_NATIVE} to specify how the limitation applies to native and non-native lowering. skip_tf_run: if set will skip the TF execution. Use this sparingly, prefer `expect_tf_error`. Use only when the test cannot recover from the TF error. expect_tf_error: if set, then expect a TF error in the given mode when executing the result of jax2tf conversion. If not set, then the limitation must have a custom_assert or non-default tol. skip_comparison: skips the numeric comparison. tol: a tolerance to use for both atol and rtol. We will use the maximum tolerance over all the applicable limitations, irrespective of their order. custom_assert: if given, then execute as `custom_assert(tst, result_jax, result_tf, args=args, tol=tol, err_msg)` , where `tst` is the current TestCase instance, and args are the input arguments that the harness created. The `tol` is the maximum tolerance based on the applicable limitations. `err_msg` is passed to NumPy assert methods. `result_tf` is already converted to NumPy arrays. """ super().__init__( description, devices=devices, dtypes=dtypes, enabled=enabled) if isinstance(modes, str): modes = (modes,) assert all(m in ["eager", "graph", "compiled"] for m in modes), "Invalid modes: {modes}" self.modes = modes self.expect_tf_error = expect_tf_error self.skip_tf_run = skip_tf_run self.custom_assert = custom_assert self.tol = tol self.skip_comparison = skip_comparison def get_max_tolerance_limitation( self, limitations: Sequence[Jax2TfLimitation] ) -> Jax2TfLimitation | None: """Pick the tolerance limitation that establishes the maximum tolerance.""" # TODO: it would be best if the limitations with tolerance are mutually exclusive # and we don't have to compute the maximum # TODO: we made this an instance method only so that we don't have to import # this module from tf_test.util. max_tol_lim = None for l in limitations: if l.tol is not None: if max_tol_lim is None or l.tol > max_tol_lim.tol: max_tol_lim = l return max_tol_lim @classmethod def limitations_for_harness( cls, harness: test_harnesses.Harness) -> Sequence[Jax2TfLimitation]: group_method = getattr(cls, harness.group_name, None) if group_method is not None: limitations = group_method(harness) assert isinstance(limitations, (list, tuple)) return limitations else: return [] @classmethod def asinh(cls, harness: test_harnesses.Harness): return [ custom_numeric(dtypes=[np.complex128], devices=("cpu",), modes=("eager", "compiled", "graph"), tol=1e-13), ] @classmethod def cholesky(cls, harness: test_harnesses.Harness): return [ custom_numeric( dtypes=[dtypes.bfloat16], tol=5e-5, # Error for GL devices=("tpu",), modes=("eager", "graph", "compiled")), ] @classmethod def conv_general_dilated(cls, harness: test_harnesses.Harness): return [ # Even in compiled mode, for GPU we see a bit of discrepancy but # very minor. custom_numeric(dtypes=[np.float32], devices="cpu", modes=("eager", "graph", "compiled"), tol=1e-4), ] @classmethod def fft(cls, harness): return [ custom_numeric(tol=1e-5, modes=("eager", "graph", "compiled"), devices=("cpu",)), ] @classmethod def max(cls, harness: test_harnesses.Harness): # TODO(bchetioui): discrepancies between TF & JAX when comparing with NaN; # JAX always returns NaN, while TF returns the value NaN is compared with. def custom_assert(tst, result_jax, result_tf, err_msg, **_): mask = np.isnan(result_jax) tst.assertAllClose(result_jax[~mask], result_tf[~mask], err_msg=err_msg) return [ # TODO(b/269996580) custom_numeric( custom_assert=custom_assert, devices="cpu", description=( "TF and JAX use different values of the compiler flag " "xla_cpu_enable_fast_min_max compiler flag and therefore have " "different behavior of NaN propagation through min/max." ), modes=("eager", "graph", "compiled")) ] @classmethod def min(cls, harness: test_harnesses.Harness): # TODO(bchetioui): discrepancies between TF & JAX when comparing with NaN; # JAX always returns NaN, while TF returns the value NaN is compared with. def custom_assert(tst, result_jax, result_tf, *, err_msg, **_): mask = np.isnan(result_jax) tst.assertAllClose(result_jax[~mask], result_tf[~mask], err_msg=err_msg) return [ # TODO(b/269996580) custom_numeric( custom_assert=custom_assert, devices="cpu", description=( "TF and JAX use different values of the compiler flag " "xla_cpu_enable_fast_min_max compiler flag and therefore have " "different behavior of NaN propagation through min/max." ), modes=("eager", "graph", "compiled"), ) ] def custom_numeric( *, description="custom numeric comparison", dtypes=(), # All modes=( "eager", "graph", ), # By default we should not need tolerance for # "compiled" devices=("cpu", "gpu", "tpu"), custom_assert=None, enabled=True, tol=None) -> Jax2TfLimitation: return Jax2TfLimitation( description, expect_tf_error=False, dtypes=dtypes, devices=devices, modes=modes, custom_assert=custom_assert, enabled=enabled, tol=tol)
Jax2TfLimitation
python
huggingface__transformers
src/transformers/models/funnel/configuration_funnel.py
{ "start": 761, "end": 7640 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`FunnelModel`]. It is used to instantiate a Funnel Transformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Funnel Transformer [funnel-transformer/small](https://huggingface.co/funnel-transformer/small) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the Funnel transformer. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`FunnelModel`]. block_sizes (`list[int]`, *optional*, defaults to `[4, 4, 4]`): The sizes of the blocks used in the model. block_repeats (`list[int]`, *optional*): If passed along, each layer of each block is repeated the number of times indicated. num_decoder_layers (`int`, *optional*, defaults to 2): The number of layers in the decoder (when not using the base model). d_model (`int`, *optional*, defaults to 768): Dimensionality of the model's hidden states. n_head (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. d_head (`int`, *optional*, defaults to 64): Dimensionality of the model's heads. d_inner (`int`, *optional*, defaults to 3072): Inner dimension in the feed-forward blocks. hidden_act (`str` or `callable`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout probability used between the two layers of the feed-forward blocks. initializer_range (`float`, *optional*, defaults to 0.1): The upper bound of the *uniform initializer* for initializing all weight matrices in attention layers. initializer_std (`float`, *optional*): The standard deviation of the *normal initializer* for initializing the embedding matrix and the weight of linear layers. Will default to 1 for the embedding matrix and the value given by Xavier initialization for linear layers. layer_norm_eps (`float`, *optional*, defaults to 1e-09): The epsilon used by the layer normalization layers. pooling_type (`str`, *optional*, defaults to `"mean"`): Possible values are `"mean"` or `"max"`. The way pooling is performed at the beginning of each block. attention_type (`str`, *optional*, defaults to `"relative_shift"`): Possible values are `"relative_shift"` or `"factorized"`. The former is faster on CPU/GPU while the latter is faster on TPU. separate_cls (`bool`, *optional*, defaults to `True`): Whether or not to separate the cls token when applying pooling. truncate_seq (`bool`, *optional*, defaults to `True`): When using `separate_cls`, whether or not to truncate the last token when pooling, to avoid getting a sequence length that is not a multiple of 2. pool_q_only (`bool`, *optional*, defaults to `True`): Whether or not to apply the pooling only to the query or to query, key and values for the attention layers. """ model_type = "funnel" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self, vocab_size=30522, block_sizes=[4, 4, 4], block_repeats=None, num_decoder_layers=2, d_model=768, n_head=12, d_head=64, d_inner=3072, hidden_act="gelu_new", hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, initializer_range=0.1, initializer_std=None, layer_norm_eps=1e-9, pooling_type="mean", attention_type="relative_shift", separate_cls=True, truncate_seq=True, pool_q_only=True, **kwargs, ): self.vocab_size = vocab_size self.block_sizes = block_sizes self.block_repeats = [1] * len(block_sizes) if block_repeats is None else block_repeats assert len(block_sizes) == len(self.block_repeats), ( "`block_sizes` and `block_repeats` should have the same length." ) self.num_decoder_layers = num_decoder_layers self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.initializer_range = initializer_range self.initializer_std = initializer_std self.layer_norm_eps = layer_norm_eps assert pooling_type in [ "mean", "max", ], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported." self.pooling_type = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported." self.attention_type = attention_type self.separate_cls = separate_cls self.truncate_seq = truncate_seq self.pool_q_only = pool_q_only super().__init__(**kwargs) @property def num_hidden_layers(self): return sum(self.block_sizes) @num_hidden_layers.setter def num_hidden_layers(self, value): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." ) @property def num_blocks(self): return len(self.block_sizes) @num_blocks.setter def num_blocks(self, value): raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`.") __all__ = ["FunnelConfig"]
FunnelConfig
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 46657, "end": 46881 }
class ____(BaseModel, extra="forbid"): """ Filter points which have specific vector assigned """ has_vector: str = Field(..., description="Filter points which have specific vector assigned")
HasVectorCondition
python
readthedocs__readthedocs.org
readthedocs/projects/views/private.py
{ "start": 31090, "end": 31600 }
class ____(DomainMixin, UpdateView): success_message = _("Domain updated") def form_valid(self, form): response = super().form_valid(form) self.object.restart_validation_process() return response def post(self, request, *args, **kwargs): project = self.get_project() if self._is_enabled(project) and not project.superproject: return super().post(request, *args, **kwargs) return HttpResponse("Action not allowed", status=401)
DomainUpdate
python
sphinx-doc__sphinx
sphinx/util/requests.py
{ "start": 1690, "end": 3655 }
class ____(requests.Session): _ignored_redirects: Sequence[re.Pattern[str]] def __init__(self, *args: Any, **kwargs: Any) -> None: self._ignored_redirects = kwargs.pop('_ignored_redirects', ()) super().__init__(*args, **kwargs) def get_redirect_target(self, resp: requests.Response) -> str | None: """Overrides the default requests.Session.get_redirect_target""" # do not follow redirections that match ignored URI patterns if resp.is_redirect: destination = urljoin(resp.url, resp.headers['location']) if any(pat.match(destination) for pat in self._ignored_redirects): raise _IgnoredRedirection( destination=destination, status_code=resp.status_code ) return super().get_redirect_target(resp) def request( # type: ignore[override] self, method: str, url: str, _user_agent: str = '', _tls_info: tuple[bool, str | dict[str, str] | None] = (), # type: ignore[assignment] **kwargs: Any, ) -> requests.Response: """Sends a request with an HTTP verb and url. This sets up User-Agent header and TLS verification automatically. """ headers = kwargs.setdefault('headers', {}) headers.setdefault('User-Agent', _user_agent or _USER_AGENT) if _tls_info: tls_verify, tls_cacerts = _tls_info verify = bool(kwargs.get('verify', tls_verify)) kwargs.setdefault('verify', verify and _get_tls_cacert(url, tls_cacerts)) else: verify = kwargs.get('verify', True) if verify: return super().request(method, url, **kwargs) with warnings.catch_warnings(): # ignore InsecureRequestWarning if verify=False warnings.filterwarnings('ignore', category=InsecureRequestWarning) return super().request(method, url, **kwargs)
_Session
python
jina-ai__jina
tests/integration/reduce/test_reduce.py
{ "start": 1164, "end": 3325 }
class ____(Executor): @requests def fake_reduce(self, **kwargs): return DocumentArray([Document(id='fake_document')]) @pytest.mark.parametrize('n_docs', [3, 5]) def test_reduce_shards(n_docs, port_generator): exposed_port = port_generator() n_shards = 3 search_flow = Flow(port=exposed_port).add( uses=ShardsExecutor, shards=n_shards, polling='all', uses_with={'n_docs': n_docs}, ) with search_flow: da = DocumentArray([Document() for _ in range(5)]) resp = Client(port=exposed_port).post( '/search', inputs=da, return_responses=True ) assert len(resp[0].docs) == 5 for doc in resp[0].docs: # assert matches and chunks are combined matches = set([doc.id for doc in doc.matches]) chunks = set([doc.id for doc in doc.chunks]) assert len(matches) == n_docs * n_shards assert len(chunks) == n_docs * n_shards for shard in range(n_shards): for match in range(n_docs): assert f'm-{shard}-{match}' in matches for chunk in range(n_docs): assert f'c-{shard}-{chunk}' in chunks # assert data properties are reduced with priority to the first shards assert doc.text == 'executor0/shard-0/rep-0' assert doc.scores['cosine'].value == 0 assert doc.modality == 'text' assert doc.tags == {'c': 'd'} @pytest.mark.parametrize('n_shards', [3, 5]) @pytest.mark.parametrize('n_docs', [3, 5]) def test_uses_after_no_reduce(n_shards, n_docs, port_generator): exposed_port = port_generator() search_flow = Flow(port=exposed_port).add( uses=ShardsExecutor, shards=n_shards, uses_after=DummyExecutor, polling='all', uses_with={'n_docs': n_docs}, ) with search_flow: da = DocumentArray([Document() for _ in range(5)]) resp = Client(port=exposed_port).post( '/search', inputs=da, return_responses=True ) # assert no reduce happened assert len(resp[0].docs) == 1 assert resp[0].docs[0].id == 'fake_document'
DummyExecutor
python
Delgan__loguru
loguru/_error_interceptor.py
{ "start": 30, "end": 1107 }
class ____: def __init__(self, should_catch, handler_id): self._should_catch = should_catch self._handler_id = handler_id def should_catch(self): return self._should_catch def print(self, record=None, *, exception=None): if not sys.stderr: return if exception is None: type_, value, traceback_ = sys.exc_info() else: type_, value, traceback_ = (type(exception), exception, exception.__traceback__) try: sys.stderr.write("--- Logging error in Loguru Handler #%d ---\n" % self._handler_id) try: record_repr = str(record) except Exception: record_repr = "/!\\ Unprintable record /!\\" sys.stderr.write("Record was: %s\n" % record_repr) traceback.print_exception(type_, value, traceback_, None, sys.stderr) sys.stderr.write("--- End of logging error ---\n") except OSError: pass finally: del type_, value, traceback_
ErrorInterceptor
python
PrefectHQ__prefect
src/prefect/server/schemas/filters.py
{ "start": 67453, "end": 67953 }
class ____(PrefectFilterBaseModel): """Filter by `WorkPool.id`.""" any_: Optional[list[UUID]] = Field( default=None, description="A list of work pool ids to include" ) def _get_filter_list( self, db: "PrefectDBInterface" ) -> Iterable[sa.ColumnExpressionArgument[bool]]: filters: list[sa.ColumnExpressionArgument[bool]] = [] if self.any_ is not None: filters.append(db.WorkPool.id.in_(self.any_)) return filters
WorkPoolFilterId
python
PrefectHQ__prefect
src/prefect/server/schemas/filters.py
{ "start": 17825, "end": 18680 }
class ____(PrefectFilterBaseModel): """Filter by `FlowRun.expected_start_time`.""" before_: Optional[DateTime] = Field( default=None, description="Only include flow runs scheduled to start at or before this time", ) after_: Optional[DateTime] = Field( default=None, description="Only include flow runs scheduled to start at or after this time", ) def _get_filter_list( self, db: "PrefectDBInterface" ) -> Iterable[sa.ColumnExpressionArgument[bool]]: filters: list[sa.ColumnExpressionArgument[bool]] = [] if self.before_ is not None: filters.append(db.FlowRun.expected_start_time <= self.before_) if self.after_ is not None: filters.append(db.FlowRun.expected_start_time >= self.after_) return filters
FlowRunFilterExpectedStartTime
python
apache__airflow
providers/databricks/src/airflow/providers/databricks/utils/mixins.py
{ "start": 2070, "end": 2227 }
class ____(Protocol): """Protocol for execute_complete method.""" statement_id: str _hook: DatabricksHook log: Logger
ExecuteCompleteHasFields
python
python__mypy
mypyc/transform/log_trace.py
{ "start": 1940, "end": 5374 }
class ____(IRTransform): def __init__(self, builder: LowLevelIRBuilder, fullname: str) -> None: super().__init__(builder) self.fullname = fullname.encode("utf-8") def visit_call(self, op: Call) -> Value: # TODO: Use different op name when constructing an instance return self.log(op, "call", op.fn.fullname) def visit_primitive_op(self, op: PrimitiveOp) -> Value: value = self.log(op, "primitive_op", op.desc.name) if op.desc.name in primitives_that_inc_ref: self.log_inc_ref(value) return value def visit_call_c(self, op: CallC) -> Value: if global_name := get_load_global_name(op): return self.log(op, "globals_dict_get_item", global_name) func_name = op.function_name if func_name == "PyObject_Vectorcall" and isinstance(op.args[0], CallC): if global_name := get_load_global_name(op.args[0]): return self.log(op, "python_call_global", global_name) elif func_name == "CPyObject_GetAttr" and isinstance(op.args[1], LoadLiteral): return self.log(op, "python_get_attr", str(op.args[1].value)) elif func_name == "PyObject_VectorcallMethod" and isinstance(op.args[0], LoadLiteral): return self.log(op, "python_call_method", str(op.args[0].value)) value = self.log(op, "call_c", func_name) if func_name in primitives_that_inc_ref: self.log_inc_ref(value) return value def visit_get_attr(self, op: GetAttr) -> Value: value = self.log(op, "get_attr", f"{op.class_type.name}.{op.attr}") if not op.is_borrowed and op.type.is_refcounted: self.log_inc_ref(op) return value def visit_set_attr(self, op: SetAttr) -> Value: name = "set_attr" if not op.is_init else "set_attr_init" return self.log(op, name, f"{op.class_type.name}.{op.attr}") def visit_box(self, op: Box) -> Value: if op.src.type is none_rprimitive: # Boxing 'None' is a very quick operation, so we don't log it. return self.add(op) else: return self.log(op, "box", str(op.src.type)) def visit_unbox(self, op: Unbox) -> Value: return self.log(op, "unbox", str(op.type)) def visit_cast(self, op: Cast) -> Value | None: value = self.log(op, "cast", str(op.type)) if not op.is_borrowed: self.log_inc_ref(value) return value def visit_inc_ref(self, op: IncRef) -> Value: return self.log(op, "inc_ref", str(op.src.type)) def visit_dec_ref(self, op: DecRef) -> Value: return self.log(op, "dec_ref", str(op.src.type)) def log_inc_ref(self, value: Value) -> None: self.log_event("inc_ref", str(value.type), value.line) def log(self, op: Op, name: str, details: str) -> Value: self.log_event(name, details, op.line) return self.add(op) def log_event(self, name: str, details: str, line: int) -> None: if line >= 0: line_str = str(line) else: line_str = "" self.builder.primitive_op( log_trace_event, [ CString(self.fullname), CString(line_str.encode("ascii")), CString(name.encode("utf-8")), CString(details.encode("utf-8")), ], line, )
LogTraceEventTransform
python
astropy__astropy
astropy/extern/configobj/validate.py
{ "start": 12150, "end": 12525 }
class ____(ValidateError): """The value supplied was of the wrong type""" def __init__(self, value): """ >>> raise VdtTypeError('jedi') Traceback (most recent call last): VdtTypeError: the value "jedi" is of the wrong type. """ ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,))
VdtTypeError
python
huggingface__transformers
src/transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py
{ "start": 18060, "end": 18148 }
class ____(Sam2VideoPositionEmbeddingSine): pass
Sam3TrackerVideoPositionEmbeddingSine
python
scipy__scipy
scipy/stats/tests/test_stats.py
{ "start": 121677, "end": 128944 }
class ____: def test_zscore(self, xp): # not in R, so tested by using: # (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4) y = stats.zscore(xp.asarray([1, 2, 3, 4])) desired = [-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999] xp_assert_close(y, xp.asarray(desired)) def test_zscore_axis(self, xp): # Test use of 'axis' keyword in zscore. x = xp.asarray([[0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 1.0, 2.0], [2.0, 0.0, 2.0, 0.0]]) t1 = 1.0/(2.0/3)**0.5 t2 = 3**0.5/3 t3 = 2**0.5 z0 = stats.zscore(x, axis=0) z1 = stats.zscore(x, axis=1) z0_expected = [[-t1, -t3/2, -t3/2, 0.0], [0.0, t3, -t3/2, t1], [t1, -t3/2, t3, -t1]] z1_expected = [[-1.0, -1.0, 1.0, 1.0], [-t2, -t2, -t2, 3**0.5], [1.0, -1.0, 1.0, -1.0]] xp_assert_close(z0, xp.asarray(z0_expected)) xp_assert_close(z1, xp.asarray(z1_expected)) def test_zscore_ddof(self, xp): # Test use of 'ddof' keyword in zscore. x = xp.asarray([[0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 2.0, 3.0]]) z = stats.zscore(x, axis=1, ddof=1) z0_expected = xp.asarray([-0.5, -0.5, 0.5, 0.5])/(1.0/3**0.5) z1_expected = xp.asarray([-1.5, -0.5, 0.5, 1.5])/((5./3)**0.5) xp_assert_close(z[0, :], z0_expected) xp_assert_close(z[1, :], z1_expected) def test_zscore_nan_propagate(self, xp): x = xp.asarray([1, 2, np.nan, 4, 5]) z = stats.zscore(x, nan_policy='propagate') xp_assert_equal(z, xp.full(x.shape, xp.nan)) def test_zscore_nan_omit(self, xp): x = xp.asarray([1, 2, xp.nan, 4, 5]) z = stats.zscore(x, nan_policy='omit') expected = xp.asarray([-1.2649110640673518, -0.6324555320336759, xp.nan, 0.6324555320336759, 1.2649110640673518 ]) xp_assert_close(z, expected) def test_zscore_nan_omit_with_ddof(self, xp): x = xp.asarray([xp.nan, 1.0, 3.0, 5.0, 7.0, 9.0]) z = stats.zscore(x, ddof=1, nan_policy='omit') expected = xp.concat([xp.asarray([xp.nan]), stats.zscore(x[1:], ddof=1)]) xp_assert_close(z, expected) @skip_xp_backends(eager_only=True, reason="lazy arrays don't do 'raise'.") def test_zscore_nan_raise(self, xp): x = xp.asarray([1, 2, xp.nan, 4, 5]) with pytest.raises(ValueError, match="The input contains nan..."): stats.zscore(x, nan_policy='raise') def test_zscore_constant_input_1d(self, xp): x = xp.asarray([-0.087] * 3) with eager_warns(RuntimeWarning, match="Precision loss occurred...", xp=xp): z = stats.zscore(x) xp_assert_equal(z, xp.full(x.shape, xp.nan)) @pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning:dask") def test_zscore_constant_input_2d(self, xp): x = xp.asarray([[10.0, 10.0, 10.0, 10.0], [10.0, 11.0, 12.0, 13.0]]) with eager_warns(RuntimeWarning, match="Precision loss occurred...", xp=xp): z0 = stats.zscore(x, axis=0) xp_assert_close(z0, xp.asarray([[xp.nan, -1.0, -1.0, -1.0], [xp.nan, 1.0, 1.0, 1.0]])) with eager_warns(RuntimeWarning, match="Precision loss occurred...", xp=xp): z1 = stats.zscore(x, axis=1) xp_assert_equal(z1, xp.stack([xp.asarray([xp.nan, xp.nan, xp.nan, xp.nan]), stats.zscore(x[1, :])])) z = stats.zscore(x, axis=None) xp_assert_equal(z, xp.reshape(stats.zscore(xp.reshape(x, (-1,))), x.shape)) y = xp.ones((3, 6)) with eager_warns(RuntimeWarning, match="Precision loss occurred...", xp=xp): z = stats.zscore(y, axis=None) xp_assert_equal(z, xp.full_like(y, xp.nan)) @pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning:dask") def test_zscore_constant_input_2d_nan_policy_omit(self, xp): x = xp.asarray([[10.0, 10.0, 10.0, 10.0], [10.0, 11.0, 12.0, xp.nan], [10.0, 12.0, xp.nan, 10.0]]) s = (3/2)**0.5 s2 = 2**0.5 with eager_warns(RuntimeWarning, match="Precision loss occurred...", xp=xp): z0 = stats.zscore(x, nan_policy='omit', axis=0) xp_assert_close(z0, xp.asarray([[xp.nan, -s, -1.0, xp.nan], [xp.nan, 0, 1.0, xp.nan], [xp.nan, s, xp.nan, xp.nan]])) with eager_warns(RuntimeWarning, match="Precision loss occurred...", xp=xp): z1 = stats.zscore(x, nan_policy='omit', axis=1) xp_assert_close(z1, xp.asarray([[xp.nan, xp.nan, xp.nan, xp.nan], [-s, 0, s, xp.nan], [-s2/2, s2, xp.nan, -s2/2]])) @pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning:dask") def test_zscore_2d_all_nan_row(self, xp): # A row is all nan, and we use axis=1. x = xp.asarray([[np.nan, np.nan, np.nan, np.nan], [10.0, 10.0, 12.0, 12.0]]) z = stats.zscore(x, nan_policy='omit', axis=1) xp_assert_close(z, xp.asarray([[np.nan, np.nan, np.nan, np.nan], [-1.0, -1.0, 1.0, 1.0]])) @pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning:dask") def test_zscore_2d_all_nan(self, xp): # The entire 2d array is nan, and we use axis=None. y = xp.full((2, 3), xp.nan) z = stats.zscore(y, nan_policy='omit', axis=None) xp_assert_equal(z, y) @pytest.mark.parametrize('x', [np.array([]), np.zeros((3, 0, 5))]) def test_zscore_empty_input(self, x, xp): x = xp.asarray(x) z = stats.zscore(x) xp_assert_equal(z, x) @skip_xp_invalid_arg def test_zscore_masked_element_0_gh19039(self, xp): # zscore returned all NaNs when 0th element was masked. See gh-19039. rng = np.random.default_rng(8675309) x = rng.standard_normal(10) mask = np.zeros_like(x) y = np.ma.masked_array(x, mask) y.mask[0] = True ref = stats.zscore(x[1:]) # compute reference from non-masked elements assert not np.any(np.isnan(ref)) res = stats.zscore(y) assert_allclose(res[1:], ref) res = stats.zscore(y, axis=None) assert_allclose(res[1:], ref) y[1:] = y[1] # when non-masked elements are identical, result is nan with pytest.warns(RuntimeWarning, match="Precision loss occurred..."): res = stats.zscore(y) assert_equal(res[1:], np.nan) with pytest.warns(RuntimeWarning, match="Precision loss occurred..."): res = stats.zscore(y, axis=None) assert_equal(res[1:], np.nan) @make_xp_test_case(stats.gzscore)
TestZscore
python
kamyu104__LeetCode-Solutions
Python/3sum.py
{ "start": 1024, "end": 1919 }
class ____(object): def threeSum(self, nums): """ :type nums: List[int] :rtype: List[List[int]] """ nums, result, i = sorted(nums), [], 0 while i < len(nums) - 2: if i == 0 or nums[i] != nums[i - 1]: j, k = i + 1, len(nums) - 1 while j < k: if nums[i] + nums[j] + nums[k] < 0: j += 1 elif nums[i] + nums[j] + nums[k] > 0: k -= 1 else: result.append([nums[i], nums[j], nums[k]]) j, k = j + 1, k - 1 while j < k and nums[j] == nums[j - 1]: j += 1 while j < k and nums[k] == nums[k + 1]: k -= 1 i += 1 return result
Solution2
python
django-crispy-forms__django-crispy-forms
tests/forms.py
{ "start": 6638, "end": 6715 }
class ____(forms.CheckboxSelectMultiple): pass
CustomCheckboxSelectMultiple
python
ZoranPandovski__al-go-rithms
data_structures/trie/Python/trie.py
{ "start": 281, "end": 2605 }
class ____: # Trie data structure class def __init__(self): self.root = self.getNode() def getNode(self): # Returns new trie node (initialized to NULLs) return TrieNode() def _charToIndex(self,ch): # private helper function # Converts key current character into index # use only 'a' through 'z' and lower case return ord(ch)-ord('a') def insert(self,key): # If not present, inserts key into trie # If the key is prefix of trie node, # just marks leaf node pCrawl = self.root length = len(key) for level in range(length): index = self._charToIndex(key[level]) # if current character is not present if not pCrawl.children[index]: pCrawl.children[index] = self.getNode() pCrawl = pCrawl.children[index] # mark last node as leaf pCrawl.isEndOfWord = True def search(self, key): # Search key in the trie # Returns true if key presents # in trie, else false pCrawl = self.root length = len(key) for level in range(length): index = self._charToIndex(key[level]) if not pCrawl.children[index]: return False pCrawl = pCrawl.children[index] return pCrawl != None and pCrawl.isEndOfWord # driver function currently designed for hardcoded inputs and output tests, we can modfy the program easily for input and output scenarios. def main(): # Input keys (use only 'a' through 'z' and lower case) keys = ["the","a","there","anaswe","any", "by","their"] output = ["Not present in trie", "Present in tire"] # Trie object t = Trie() # Construct trie for key in keys: t.insert(key) # Search for different keys print("{} ---- {}".format("the",output[t.search("the")])) print("{} ---- {}".format("these",output[t.search("these")])) print("{} ---- {}".format("their",output[t.search("their")])) print("{} ---- {}".format("thaw",output[t.search("thaw")])) if __name__ == '__main__': main()
Trie
python
walkccc__LeetCode
solutions/1320. Minimum Distance to Type a Word Using Two Fingers/1320.py
{ "start": 0, "end": 791 }
class ____: def minimumDistance(self, word: str) -> int: def dist(a: int, b: int) -> int: if a == 26: # the first hovering state return 0 x1, y1 = a // 6, a % 6 x2, y2 = b // 6, b % 6 return abs(x1 - x2) + abs(y1 - y2) @functools.lru_cache(None) def dp(i: int, j: int, k: int) -> int: """ Returns the minimum distance to type the `word`, where the left finger is on the i-th letter, the right finger is on the j-th letter, and the words[0..k) have been written. """ if k == len(word): return 0 nxt = ord(word[k]) - ord('A') moveLeft = dist(i, nxt) + dp(nxt, j, k + 1) moveRight = dist(j, nxt) + dp(i, nxt, k + 1) return min(moveLeft, moveRight) return dp(26, 26, 0)
Solution
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/postgresql/base.py
{ "start": 108926, "end": 112849 }
class ____(reflection.Inspector): dialect: PGDialect def get_table_oid( self, table_name: str, schema: Optional[str] = None ) -> int: """Return the OID for the given table name. :param table_name: string name of the table. For special quoting, use :class:`.quoted_name`. :param schema: string schema name; if omitted, uses the default schema of the database connection. For special quoting, use :class:`.quoted_name`. """ with self._operation_context() as conn: return self.dialect.get_table_oid( conn, table_name, schema, info_cache=self.info_cache ) def get_domains( self, schema: Optional[str] = None ) -> List[ReflectedDomain]: """Return a list of DOMAIN objects. Each member is a dictionary containing these fields: * name - name of the domain * schema - the schema name for the domain. * visible - boolean, whether or not this domain is visible in the default search path. * type - the type defined by this domain. * nullable - Indicates if this domain can be ``NULL``. * default - The default value of the domain or ``None`` if the domain has no default. * constraints - A list of dict wit the constraint defined by this domain. Each element constaints two keys: ``name`` of the constraint and ``check`` with the constraint text. :param schema: schema name. If None, the default schema (typically 'public') is used. May also be set to ``'*'`` to indicate load domains for all schemas. .. versionadded:: 2.0 """ with self._operation_context() as conn: return self.dialect._load_domains( conn, schema, info_cache=self.info_cache ) def get_enums(self, schema: Optional[str] = None) -> List[ReflectedEnum]: """Return a list of ENUM objects. Each member is a dictionary containing these fields: * name - name of the enum * schema - the schema name for the enum. * visible - boolean, whether or not this enum is visible in the default search path. * labels - a list of string labels that apply to the enum. :param schema: schema name. If None, the default schema (typically 'public') is used. May also be set to ``'*'`` to indicate load enums for all schemas. """ with self._operation_context() as conn: return self.dialect._load_enums( conn, schema, info_cache=self.info_cache ) def get_foreign_table_names( self, schema: Optional[str] = None ) -> List[str]: """Return a list of FOREIGN TABLE names. Behavior is similar to that of :meth:`_reflection.Inspector.get_table_names`, except that the list is limited to those tables that report a ``relkind`` value of ``f``. """ with self._operation_context() as conn: return self.dialect._get_foreign_table_names( conn, schema, info_cache=self.info_cache ) def has_type( self, type_name: str, schema: Optional[str] = None, **kw: Any ) -> bool: """Return if the database has the specified type in the provided schema. :param type_name: the type to check. :param schema: schema name. If None, the default schema (typically 'public') is used. May also be set to ``'*'`` to check in all schemas. .. versionadded:: 2.0 """ with self._operation_context() as conn: return self.dialect.has_type( conn, type_name, schema, info_cache=self.info_cache )
PGInspector
python
aio-libs__aiohttp
aiohttp/multipart.py
{ "start": 19881, "end": 27499 }
class ____: """Multipart body reader.""" #: Response wrapper, used when multipart readers constructs from response. response_wrapper_cls = MultipartResponseWrapper #: Multipart reader class, used to handle multipart/* body parts. #: None points to type(self) multipart_reader_cls: type["MultipartReader"] | None = None #: Body part reader class for non multipart/* content types. part_reader_cls = BodyPartReader def __init__(self, headers: Mapping[str, str], content: StreamReader) -> None: self._mimetype = parse_mimetype(headers[CONTENT_TYPE]) assert self._mimetype.type == "multipart", "multipart/* content type expected" if "boundary" not in self._mimetype.parameters: raise ValueError( "boundary missed for Content-Type: %s" % headers[CONTENT_TYPE] ) self.headers = headers self._boundary = ("--" + self._get_boundary()).encode() self._content = content self._default_charset: str | None = None self._last_part: MultipartReader | BodyPartReader | None = None self._at_eof = False self._at_bof = True self._unread: list[bytes] = [] def __aiter__(self) -> Self: return self async def __anext__( self, ) -> Union["MultipartReader", BodyPartReader] | None: part = await self.next() if part is None: raise StopAsyncIteration return part @classmethod def from_response( cls, response: "ClientResponse", ) -> MultipartResponseWrapper: """Constructs reader instance from HTTP response. :param response: :class:`~aiohttp.client.ClientResponse` instance """ obj = cls.response_wrapper_cls( response, cls(response.headers, response.content) ) return obj def at_eof(self) -> bool: """Returns True if the final boundary was reached, false otherwise.""" return self._at_eof async def next( self, ) -> Union["MultipartReader", BodyPartReader] | None: """Emits the next multipart body part.""" # So, if we're at BOF, we need to skip till the boundary. if self._at_eof: return None await self._maybe_release_last_part() if self._at_bof: await self._read_until_first_boundary() self._at_bof = False else: await self._read_boundary() if self._at_eof: # we just read the last boundary, nothing to do there # https://github.com/python/mypy/issues/17537 return None # type: ignore[unreachable] part = await self.fetch_next_part() # https://datatracker.ietf.org/doc/html/rfc7578#section-4.6 if ( self._last_part is None and self._mimetype.subtype == "form-data" and isinstance(part, BodyPartReader) ): _, params = parse_content_disposition(part.headers.get(CONTENT_DISPOSITION)) if params.get("name") == "_charset_": # Longest encoding in https://encoding.spec.whatwg.org/encodings.json # is 19 characters, so 32 should be more than enough for any valid encoding. charset = await part.read_chunk(32) if len(charset) > 31: raise RuntimeError("Invalid default charset") self._default_charset = charset.strip().decode() part = await self.fetch_next_part() self._last_part = part return self._last_part async def release(self) -> None: """Reads all the body parts to the void till the final boundary.""" while not self._at_eof: item = await self.next() if item is None: break await item.release() async def fetch_next_part( self, ) -> Union["MultipartReader", BodyPartReader]: """Returns the next body part reader.""" headers = await self._read_headers() return self._get_part_reader(headers) def _get_part_reader( self, headers: "CIMultiDictProxy[str]", ) -> Union["MultipartReader", BodyPartReader]: """Dispatches the response by the `Content-Type` header. Returns a suitable reader instance. :param dict headers: Response headers """ ctype = headers.get(CONTENT_TYPE, "") mimetype = parse_mimetype(ctype) if mimetype.type == "multipart": if self.multipart_reader_cls is None: return type(self)(headers, self._content) return self.multipart_reader_cls(headers, self._content) else: return self.part_reader_cls( self._boundary, headers, self._content, subtype=self._mimetype.subtype, default_charset=self._default_charset, ) def _get_boundary(self) -> str: boundary = self._mimetype.parameters["boundary"] if len(boundary) > 70: raise ValueError("boundary %r is too long (70 chars max)" % boundary) return boundary async def _readline(self) -> bytes: if self._unread: return self._unread.pop() return await self._content.readline() async def _read_until_first_boundary(self) -> None: while True: chunk = await self._readline() if chunk == b"": raise ValueError(f"Could not find starting boundary {self._boundary!r}") chunk = chunk.rstrip() if chunk == self._boundary: return elif chunk == self._boundary + b"--": self._at_eof = True return async def _read_boundary(self) -> None: chunk = (await self._readline()).rstrip() if chunk == self._boundary: pass elif chunk == self._boundary + b"--": self._at_eof = True epilogue = await self._readline() next_line = await self._readline() # the epilogue is expected and then either the end of input or the # parent multipart boundary, if the parent boundary is found then # it should be marked as unread and handed to the parent for # processing if next_line[:2] == b"--": self._unread.append(next_line) # otherwise the request is likely missing an epilogue and both # lines should be passed to the parent for processing # (this handles the old behavior gracefully) else: self._unread.extend([next_line, epilogue]) else: raise ValueError(f"Invalid boundary {chunk!r}, expected {self._boundary!r}") async def _read_headers(self) -> "CIMultiDictProxy[str]": lines = [] while True: chunk = await self._content.readline() chunk = chunk.strip() lines.append(chunk) if not chunk: break parser = HeadersParser() headers, raw_headers = parser.parse_headers(lines) return headers async def _maybe_release_last_part(self) -> None: """Ensures that the last read body part is read completely.""" if self._last_part is not None: if not self._last_part.at_eof(): await self._last_part.release() self._unread.extend(self._last_part._unread) self._last_part = None _Part = tuple[Payload, str, str]
MultipartReader
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance5.py
{ "start": 1149, "end": 1307 }
class ____: ... def func3(c1: Callable[[int], None]): if isinstance(c1, IsNotFinal): reveal_type(c1, expected_text="IsNotFinal") @final
IsNotFinal
python
pytorch__pytorch
torch/testing/_internal/opinfo/core.py
{ "start": 94420, "end": 106963 }
class ____(OpInfo): """Operator information for 'universal binary functions (binary ufuncs).' These are functions of two tensors with common properties like: - they are elementwise functions - the output shape is determined by the input shape - they typically have method and inplace variants - they typically support the out kwarg - they typically have NumPy or SciPy references See NumPy's universal function documentation (https://numpy.org/doc/stable/reference/ufuncs.html) for more details about the concept of ufuncs. """ def __init__( self, name, *, sample_inputs_func=sample_inputs_elementwise_binary, reference_inputs_func=reference_inputs_elementwise_binary, sample_kwargs=lambda device, dtype, input: ({}, {}), error_inputs_func=None, lhs_make_tensor_kwargs=None, rhs_make_tensor_kwargs=None, always_returns_bool=False, # Set to true if the op always returns bool tensors supports_rhs_python_scalar=True, # Whether the operator allows Tensor x scalar inputs supports_one_python_scalar=False, # Whether the operator allows scalar x tensor and tensor x scalar inputs supports_two_python_scalars=False, # Whether the operator allows scalar x scalar inputs **kwargs, ): self._original_binary_ufunc_args = locals().copy() # Elementwise binary operations perform the equivalent of test_numpy_refs # in test_binary_ufuncs, but with additional test granularity. So the # generic test_ops.py test is skipped because it's redundant. common_skips = ( DecorateInfo( unittest.skip("Skipping redundant test."), "TestCommon", "test_numpy_refs", ), ) kwargs["skips"] = kwargs.get("skips", ()) + common_skips super().__init__( name, sample_inputs_func=sample_inputs_func, reference_inputs_func=reference_inputs_func, error_inputs_func=make_error_inputs_elementwise_binary(error_inputs_func), **kwargs, ) self.sample_kwargs = sample_kwargs # [lr]hs_make_tensor_kwargs are part of the OpInfo to be able to dynamically generate valid samples later on. if lhs_make_tensor_kwargs is None: lhs_make_tensor_kwargs = {} self.lhs_make_tensor_kwargs = lhs_make_tensor_kwargs if rhs_make_tensor_kwargs is None: rhs_make_tensor_kwargs = {} self.rhs_make_tensor_kwargs = rhs_make_tensor_kwargs self.always_returns_bool = always_returns_bool self.supports_rhs_python_scalar = supports_rhs_python_scalar self.supports_one_python_scalar = supports_one_python_scalar self.supports_two_python_scalars = supports_two_python_scalars if self.supports_two_python_scalars: self.supports_one_python_scalar = True if self.supports_one_python_scalar: assert supports_rhs_python_scalar, ( "Can't support lhs and rhs Python scalars but not rhs scalars!" ) # The following functions and classes are for testing elementwise unary operators. def sample_inputs_elementwise_unary( op_info, device, dtype, requires_grad, op_kwargs=None, **kwargs ): if not op_kwargs: op_kwargs = {} _L = S if kwargs.get("small_inputs_only", False) else L low, high = op_info.domain is_floating = dtype.is_floating_point or dtype.is_complex low = low if low is None or not is_floating else low + op_info._domain_eps high = high if high is None or not is_floating else high - op_info._domain_eps if ( op_info.supports_sparse_csr or op_info.supports_sparse_csc or op_info.supports_sparse_bsr or op_info.supports_sparse_bsc ): # Tensors with dim=2 for sparse compressed testing yield SampleInput( make_tensor( (_L, _L), device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad, ), kwargs=op_kwargs, ) else: # Creates a 1D, empty, and scalar tensor for shape in ((_L,), (1, 0, 3), ()): yield SampleInput( make_tensor( shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad, ), kwargs=op_kwargs, ) # Replace values satisfying condition with a safe value. This is used to block # out values the could cause singularity like tan(pi/2) def _replace_values_in_tensor(tensor, condition, safe_value): mask = condition(tensor) tensor.masked_fill_(mask, safe_value) # Helper to create a unary elementwise tensor with valid inputs def _make_unary_elementwise_tensor(shape, *, op, dtype, **kwargs): low, high = op.domain is_floating = dtype.is_floating_point or dtype.is_complex low = low if low is None or not is_floating else low + op._domain_eps high = high if high is None or not is_floating else high - op._domain_eps a = make_tensor(shape, low=low, high=high, dtype=dtype, **kwargs) if op.reference_numerics_filter is not None and dtype is not torch.bool: condition, safe_value = op.reference_numerics_filter _replace_values_in_tensor(a, condition, safe_value) return a # Restricts the values in the tensor to the domain of the # given elementwise unary operator def _filter_unary_elementwise_tensor(a, *, op): # short-circuits for boolean tensors if a.dtype is torch.bool: return a low, high = op.domain is_floating = a.dtype.is_floating_point or a.dtype.is_complex low = low if low is None or not is_floating else low + op._domain_eps high = high if high is None or not is_floating else high - op._domain_eps if a.dtype is torch.uint8 and low is not None: low = max(low, 0) if not a.dtype.is_floating_point and not a.dtype.is_complex: low = math.ceil(low) if low is not None else None high = math.floor(high) if high is not None else None if op.reference_numerics_filter is not None: condition, safe_value = op.reference_numerics_filter _replace_values_in_tensor(a, condition, safe_value) if low is not None or high is not None: if a.dtype.is_complex: a.real.clamp_(low, high) a.imag.clamp_(low, high) else: a.clamp_(min=low, max=high) return a def generate_elementwise_unary_tensors(op, *, device, dtype, requires_grad, **kwargs): # Special-cases bool if dtype is torch.bool: tensors = ( torch.empty(0, device=device, dtype=torch.bool), torch.tensor(True, device=device), torch.tensor(False, device=device), torch.tensor((True, False), device=device), make_tensor((812,), device=device, dtype=dtype), make_tensor((1029, 917), device=device, dtype=dtype), ) for a in tensors: yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) shapes = ( (1029, 917), (812,), # Empty sizes (0,), (0, 3, 3), (1, 0, 5), (6, 0, 0, 0), (3, 0, 1, 0), ) make_arg = partial( _make_unary_elementwise_tensor, op=op, device=device, dtype=dtype, requires_grad=requires_grad, ) for shape in shapes: a = make_arg(shape) yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) def generate_elementwise_unary_small_value_tensors( op, *, device, dtype, requires_grad=False ): for sample in generate_elementwise_binary_small_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ): a = _filter_unary_elementwise_tensor(sample.input, op=op) yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) def generate_elementwise_unary_large_value_tensors( op, *, device, dtype, requires_grad=False ): for sample in generate_elementwise_binary_large_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ): a = _filter_unary_elementwise_tensor(sample.input, op=op) yield SampleInput(sample.input, kwargs=op.sample_kwargs(device, dtype, a)[0]) def generate_elementwise_unary_extremal_value_tensors( op, *, device, dtype, requires_grad=False ): for sample in generate_elementwise_binary_extremal_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad ): yield SampleInput( sample.input, kwargs=op.sample_kwargs(device, dtype, sample.input)[0] ) def generate_elementwise_unary_noncontiguous_tensors( op, *, device, dtype, requires_grad=False ): make_arg = partial( _make_unary_elementwise_tensor, op=op, device=device, dtype=dtype, requires_grad=requires_grad, ) # Generic noncontiguity t = make_arg((1026,), noncontiguous=True) yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) # Transposed t = make_arg((1024, 1024)).T yield SampleInput(t, kwargs=op.sample_kwargs(device, dtype, t)[0]) # Expanded tensors shapes = ((1, 3), (1, 7), (5, 7)) for shape in shapes: t = make_arg(shape) t_non_contig = t.expand(3, -1, -1) yield SampleInput( t_non_contig, kwargs=op.sample_kwargs(device, dtype, t_non_contig)[0] ) def generate_elementwise_unary_arbitrarily_strided_tensors( op, *, device, dtype, requires_grad=False ): # shape, strides, offset strided_cases = ( ((5, 6, 2), (1, 1, 7), 2), ((5, 5, 4), (1, 1, 7), 2), ((5, 5, 2), (4, 5, 7), 3), ((5, 5, 2), (5, 5, 7), 3), ((5, 5, 2), (5, 5, 5), 3), ((9, 5, 2), (0, 1, 7), 3), ) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad ) for shape, strides, offset in strided_cases: a = make_arg( 500, ).as_strided(shape, strides, offset) yield SampleInput(a, kwargs=op.sample_kwargs(device, dtype, a)[0]) # Reuses the elementwise binary generators for consistency # TODO: in the future generalize the reference generators to handle n-ary elementwise operations def _reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): yield from op.sample_inputs_func(op, device, dtype, requires_grad, **kwargs) yield from generate_elementwise_unary_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) if dtype is not torch.bool: yield from generate_elementwise_unary_small_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) if dtype not in (torch.bool, torch.uint8, torch.int8) and ( op.handles_large_floats or (not dtype.is_floating_point and not dtype.is_complex) ): yield from generate_elementwise_unary_large_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) if dtype.is_floating_point or ( op.handles_complex_extremal_values and dtype.is_complex ): yield from generate_elementwise_unary_extremal_value_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) def reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs): gen = partial( _reference_inputs_elementwise_unary, op, device, dtype, requires_grad, **kwargs ) # yields "normal" samples yield from gen() # yields noncontiguous samples for sample in gen(): yield sample.noncontiguous() yield from generate_elementwise_unary_noncontiguous_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) yield from generate_elementwise_unary_arbitrarily_strided_tensors( op, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs ) # Metadata class for unary "universal functions (ufuncs)" that accept a single # tensor and have common properties like:
BinaryUfuncInfo
python
ray-project__ray
release/ray_release/exception.py
{ "start": 3239, "end": 3332 }
class ____(CommandTimeout): exit_code = ExitCode.CLUSTER_WAIT_TIMEOUT
PrepareCommandTimeout
python
django__django
django/db/backends/oracle/utils.py
{ "start": 61, "end": 1198 }
class ____: """ A late-binding cursor variable that can be passed to Cursor.execute as a parameter, in order to receive the id of the row created by an insert statement. """ types = { "AutoField": int, "BigAutoField": int, "SmallAutoField": int, "IntegerField": int, "BigIntegerField": int, "SmallIntegerField": int, "PositiveBigIntegerField": int, "PositiveSmallIntegerField": int, "PositiveIntegerField": int, "BooleanField": int, "FloatField": Database.DB_TYPE_BINARY_DOUBLE, "DateTimeField": Database.DB_TYPE_TIMESTAMP, "DateField": datetime.date, "DecimalField": decimal.Decimal, } def __init__(self, field): internal_type = getattr(field, "target_field", field).get_internal_type() self.db_type = self.types.get(internal_type, str) self.bound_param = None def bind_parameter(self, cursor): self.bound_param = cursor.cursor.var(self.db_type) return self.bound_param def get_value(self): return self.bound_param.getvalue()
BoundVar
python
getsentry__sentry
tests/sentry/monitors/endpoints/test_project_processing_errors_index.py
{ "start": 299, "end": 2475 }
class ____(MonitorTestCase, APITestCase): endpoint = "sentry-api-0-project-processing-errors-index" method = "delete" def setUp(self) -> None: super().setUp() self.login_as(user=self.user) def test_no_error_type(self) -> None: resp = self.get_error_response(self.organization.slug, self.project.slug) assert resp.status_code == 400 assert resp.content == b'["Invalid error type"]' def test_invalid_error_type(self) -> None: resp = self.get_error_response( self.organization.slug, self.project.slug, qs_params={"errortype": "17"} ) assert resp.status_code == 400 assert resp.content == b'["Invalid error type"]' def test(self) -> None: monitor_errors = [ build_checkin_processing_error( [{"type": ProcessingErrorType.CHECKIN_INVALID_GUID}], message_overrides={"project_id": self.project.id}, ), build_checkin_processing_error( [{"type": ProcessingErrorType.CHECKIN_INVALID_GUID}], message_overrides={"project_id": self.project.id}, ), build_checkin_processing_error( [{"type": ProcessingErrorType.CHECKIN_INVALID_GUID}], message_overrides={"project_id": self.project.id}, ), build_checkin_processing_error( [{"type": ProcessingErrorType.CHECKIN_INVALID_GUID}], message_overrides={"project_id": self.project.id}, ), build_checkin_processing_error( [{"type": ProcessingErrorType.CHECKIN_INVALID_DURATION, "duration": "-1"}], message_overrides={"project_id": self.project.id}, ), ] for error in monitor_errors: store_error(error, None) resp = self.get_success_response( self.organization.slug, self.project.slug, qs_params={"errortype": "4"}, ) assert resp.status_code == 204 assert get_errors_for_projects([self.project]) == [monitor_errors[4]]
ProjectProcessingErrorsIndexEndpointTest
python
ray-project__ray
python/ray/data/_internal/execution/interfaces/ref_bundle.py
{ "start": 741, "end": 14667 }
class ____: """A group of data block references and their metadata. Operators take in and produce streams of RefBundles. Most commonly a RefBundle consists of a single block object reference. In some cases, e.g., due to block splitting, or for a reduce task, there may be more than one block. Block bundles have ownership semantics, i.e., shared ownership (similar to C++ shared_ptr, multiple operators share the same block bundle), or unique ownership (similar to C++ unique_ptr, only one operator owns the block bundle). This allows operators to know whether they can destroy blocks when they don't need them. Destroying blocks eagerly is more efficient than waiting for Python GC / Ray reference counting to kick in. """ # The size_bytes must be known in the metadata, num_rows is optional. blocks: Tuple[Tuple[ObjectRef[Block], BlockMetadata], ...] # The schema of the blocks in this bundle. This is optional, and may be None # if blocks are empty. schema: Optional["Schema"] # Whether we own the blocks (can safely destroy them). owns_blocks: bool # The slices of the blocks in this bundle. After __post_init__, this is always # a list with length equal to len(blocks). Individual entries can be None to # represent a full block (equivalent to BlockSlice(0, num_rows)). # Pass None during construction to initialize all slices as None (full blocks). slices: Optional[List[Optional[BlockSlice]]] = None # This attribute is used by the split() operator to assign bundles to logical # output splits. It is otherwise None. output_split_idx: Optional[int] = None # Object metadata (size, locations, spilling status) _cached_object_meta: Optional[Dict[ObjectRef, "_ObjectMetadata"]] = None # Preferred locations for this bundle determined based on the locations # of individual objects and their corresponding size, ie location with the # largest total number of bytes present there has the highest preference. _cached_preferred_locations: Optional[Dict[NodeIdStr, int]] = None def __post_init__(self): if not isinstance(self.blocks, tuple): object.__setattr__(self, "blocks", tuple(self.blocks)) if self.slices is None: self.slices = [None] * len(self.blocks) else: assert len(self.blocks) == len( self.slices ), "Number of blocks and slices must match" # Validate slice ranges for (_, metadata), block_slice in zip(self.blocks, self.slices): if block_slice is not None: assert ( block_slice.start_offset >= 0 ), f"Slice start_offset must be non-negative: {block_slice.start_offset}" assert ( block_slice.end_offset >= block_slice.start_offset ), f"Slice end_offset must be >= start_offset: [{block_slice.start_offset}, {block_slice.end_offset})" if metadata.num_rows is not None: assert ( block_slice.end_offset <= metadata.num_rows ), f"Slice range [{block_slice.start_offset}, {block_slice.end_offset}) exceeds block num_rows: {metadata.num_rows}" for b in self.blocks: assert isinstance(b, tuple), b assert len(b) == 2, b assert isinstance(b[0], ray.ObjectRef), b[0] assert isinstance(b[1], BlockMetadata), b[1] if b[1].size_bytes is None: raise ValueError( "The size in bytes of the block must be known: {}".format(b) ) def __setattr__(self, key, value): if hasattr(self, key) and key in ["blocks", "owns_blocks"]: raise ValueError(f"The `{key}` field of RefBundle cannot be updated.") object.__setattr__(self, key, value) @property def block_refs(self) -> List[ObjectRef[Block]]: """List of block references in this bundle.""" return [block_ref for block_ref, _ in self.blocks] @property def metadata(self) -> List[BlockMetadata]: """List of block metadata in this bundle.""" return [metadata for _, metadata in self.blocks] def num_rows(self) -> Optional[int]: """Number of rows present in this bundle, if known. Iterates through blocks and their corresponding slices to calculate the total. Note: Block metadata always refers to the full block, not the slice. - If block_slice is None, uses the full block's metadata.num_rows - If block_slice is present, uses the slice's num_rows (partial block portion) - Returns None if any full block has unknown row count (metadata.num_rows is None) """ total = 0 for metadata, block_slice in zip(self.metadata, self.slices): if block_slice is None: if metadata.num_rows is None: return None total += metadata.num_rows else: total += block_slice.num_rows return total def size_bytes(self) -> int: """Size of the blocks of this bundle in bytes. Iterates through blocks and their corresponding slices to calculate the total size. Note: Block metadata always refers to the full block, not the slice. - If block_slice is None, uses the full block's metadata.size_bytes - If block_slice is present but num_rows is unknown or zero, uses full metadata.size_bytes - If block_slice represents a partial block, estimates size proportionally based on (metadata.size_bytes / metadata.num_rows) * block_slice.num_rows - Otherwise, uses the full metadata.size_bytes """ total = 0 for (_, metadata), block_slice in zip(self.blocks, self.slices): if block_slice is None: # Full block total += metadata.size_bytes elif metadata.num_rows is None or metadata.num_rows == 0: # Unknown num_rows or empty block - use full metadata size total += metadata.size_bytes elif metadata.num_rows != block_slice.num_rows: # Partial block - estimate size based on rows per_row = metadata.size_bytes / metadata.num_rows total += max(1, int(math.ceil(per_row * block_slice.num_rows))) else: total += metadata.size_bytes return total def destroy_if_owned(self) -> int: """Clears the object store memory for these blocks if owned. Returns: The number of bytes freed. """ should_free = self.owns_blocks and DataContext.get_current().eager_free for block_ref in self.block_refs: trace_deallocation( block_ref, "RefBundle.destroy_if_owned", free=should_free ) return self.size_bytes() if should_free else 0 def get_preferred_object_locations(self) -> Dict[NodeIdStr, int]: """Returns a mapping of node IDs to total bytes stored on each node. Returns: Dict mapping node ID to total bytes stored on that node """ meta = self._get_cached_metadata() if self._cached_preferred_locations is None: preferred_locs: Dict[NodeIdStr, int] = defaultdict(int) for ref, obj_meta in meta.items(): for loc in obj_meta.locs: preferred_locs[loc] += obj_meta.size self._cached_preferred_locations = preferred_locs return self._cached_preferred_locations def _get_cached_metadata(self) -> Dict[ObjectRef, "_ObjectMetadata"]: if self._cached_object_meta is None: # This call is pretty fast for owned objects (~5k/s), so we don't need to # batch it for now. meta = ray.experimental.get_local_object_locations(self.block_refs) # Extract locations object_metas: Dict[ObjectRef, _ObjectMetadata] = { ref: _ObjectMetadata( size=meta[ref]["object_size"], spilled=meta[ref]["did_spill"], locs=meta[ref]["node_ids"], ) for ref in self.block_refs } self._cached_object_meta = object_metas return self._cached_object_meta def slice(self, needed_rows: int) -> Tuple["RefBundle", "RefBundle"]: """Slice a Ref Bundle into the first bundle containing the first `needed_rows` rows and the remaining bundle containing the remaining rows. Args: needed_rows: Number of rows to take from the head of the bundle. Returns: A tuple of (sliced_bundle, remaining_bundle). The needed rows must be less than the number of rows in the bundle. """ assert needed_rows > 0, "needed_rows must be positive." assert ( self.num_rows() is not None ), "Cannot slice a RefBundle with unknown number of rows." assert ( needed_rows < self.num_rows() ), f"To slice a RefBundle, the number of requested rows must be less than the number of rows in the bundle. Requested {needed_rows} rows but bundle only has {self.num_rows()} rows." block_slices = [] for metadata, block_slice in zip(self.metadata, self.slices): if block_slice is None: # None represents a full block, convert to explicit BlockSlice assert ( metadata.num_rows is not None ), "Cannot derive block slice for a RefBundle with unknown block row counts." block_slices.append( BlockSlice(start_offset=0, end_offset=metadata.num_rows) ) else: block_slices.append(block_slice) consumed_blocks: List[Tuple[ObjectRef[Block], BlockMetadata]] = [] consumed_slices: List[BlockSlice] = [] remaining_blocks: List[Tuple[ObjectRef[Block], BlockMetadata]] = [] remaining_slices: List[BlockSlice] = [] rows_to_take = needed_rows for (block_ref, metadata), block_slice in zip(self.blocks, block_slices): block_rows = block_slice.num_rows if rows_to_take >= block_rows: consumed_blocks.append((block_ref, metadata)) consumed_slices.append(block_slice) rows_to_take -= block_rows else: if rows_to_take == 0: remaining_blocks.append((block_ref, metadata)) remaining_slices.append(block_slice) continue consume_slice = BlockSlice( start_offset=block_slice.start_offset, end_offset=block_slice.start_offset + rows_to_take, ) consumed_blocks.append((block_ref, metadata)) consumed_slices.append(consume_slice) leftover_rows = block_rows - rows_to_take if leftover_rows > 0: remainder_slice = BlockSlice( start_offset=consume_slice.end_offset, end_offset=block_slice.end_offset, ) remaining_blocks.append((block_ref, metadata)) remaining_slices.append(remainder_slice) rows_to_take = 0 sliced_bundle = RefBundle( blocks=tuple(consumed_blocks), schema=self.schema, owns_blocks=False, slices=consumed_slices if consumed_slices else None, ) remaining_bundle = RefBundle( blocks=tuple(remaining_blocks), schema=self.schema, owns_blocks=False, slices=remaining_slices if remaining_slices else None, ) return sliced_bundle, remaining_bundle @classmethod def merge_ref_bundles(cls, bundles: List["RefBundle"]) -> "RefBundle": assert bundles, "Cannot merge an empty list of RefBundles." merged_blocks = list(itertools.chain(*[bundle.blocks for bundle in bundles])) merged_slices = list(itertools.chain(*[bundle.slices for bundle in bundles])) return cls( blocks=tuple(merged_blocks), schema=bundles[0].schema, # Assume all bundles have the same schema owns_blocks=bundles[ 0 ].owns_blocks, # Assume all bundles have the same ownership slices=merged_slices, ) def __eq__(self, other) -> bool: return self is other def __hash__(self) -> int: return id(self) def __len__(self) -> int: return len(self.blocks) def __str__(self) -> str: lines = [ f"RefBundle({len(self.blocks)} blocks,", f" {self.num_rows()} rows,", f" schema={self.schema},", f" owns_blocks={self.owns_blocks},", " blocks=(", ] # Loop through each block and show details for i, ((block_ref, metadata), block_slice) in enumerate( zip(self.blocks, self.slices) ): row_str = ( f"{metadata.num_rows} rows" if metadata.num_rows is not None else "unknown rows" ) bytes_str = f"{metadata.size_bytes} bytes" slice_str = ( f"slice={block_slice}" if block_slice is not None else "slice=None (full block)" ) lines.append(f" {i}: {row_str}, {bytes_str}, {slice_str}") lines.append(" )") lines.append(")") return "\n".join(lines) @dataclass
RefBundle
python
aio-libs__aiohttp
tests/test_pytest_plugin.py
{ "start": 6457, "end": 7267 }
class ____(TestClient): pass @pytest.fixture def aiohttp_client_cls(): return CustomClient async def test_hello(aiohttp_client) -> None: client = await aiohttp_client(Application()) assert isinstance(client, CustomClient) """ ) testdir.makeconftest(CONFTEST) result = testdir.runpytest() result.assert_outcomes(passed=1) def test_aiohttp_client_cls_fixture_factory(testdir: pytest.Testdir) -> None: testdir.makeconftest( CONFTEST + """ def pytest_configure(config): config.addinivalue_line("markers", "rest: RESTful API tests") config.addinivalue_line("markers", "graphql: GraphQL API tests") """ ) testdir.makepyfile( """ import pytest from aiohttp.web import Application from aiohttp.test_utils import TestClient
CustomClient
python
tensorflow__tensorflow
tensorflow/python/ops/init_ops.py
{ "start": 3388, "end": 6098 }
class ____(Initializer): """Initializer that generates tensors initialized to 0. @compatibility(TF2) `tf.compat.v1.zeros_initializer` is compatible with eager execution and `tf.function`. To migrate to TF2, please use `tf.zeros_initializer` instead. The `dtype` argument in `tf.compat.v1.zeros_initializer.__init__()` does not exist in `tf.zeros_initializer.__init__()`. However, you can specify the `dtype` in `__call__()` in both cases. #### Structural Mapping to TF2 Before: ```python initializer = tf.compat.v1.zeros_initializer(dtype=tf.float32) variable = tf.Variable(initializer(shape=[3, 3])) ``` After: ```python initializer = tf.zeros_initializer() variable = tf.Variable(initializer(shape=[3, 3], dtype=tf.float32)) ``` #### How to Map Arguments | TF1 Arg Name | TF2 Arg Name | Note | | :------------------- | :--------------- | :------------------------- | | `dtype` | `dtype` | In `__call__()` method | | `partition_info` | - | (`__call__` arg in TF1) Not supported | #### Before & After Usage Example Before: >>> initializer = tf.compat.v1.zeros_initializer(dtype=tf.float32) >>> tf.Variable(initializer(shape=[3])).numpy() array([0., 0., 0.], dtype=float32) >>> tf.Variable(initializer(shape=[3, 3])).numpy() array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=float32) >>> initializer = tf.compat.v1.zeros_initializer() >>> tf.Variable(initializer(shape=[3], dtype=tf.float32)).numpy() array([0., 0., 0.], dtype=float32) >>> tf.Variable(initializer(shape=[3, 3], dtype=tf.float32)).numpy() array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=float32) After: >>> initializer = tf.zeros_initializer() >>> tf.Variable(initializer(shape=[3], dtype=tf.float32)).numpy() array([0., 0., 0.], dtype=float32) >>> tf.Variable(initializer(shape=[3, 3], dtype=tf.float32)).numpy() array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=float32) @end_compatibility """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, dtype=dtypes.float32): self.dtype = dtypes.as_dtype(dtype) def __call__(self, shape, dtype=None, partition_info=None): if dtype is None: dtype = self.dtype return array_ops.zeros(shape, dtype) def get_config(self): return {"dtype": self.dtype.name} @tf_export(v1=["initializers.ones", "ones_initializer"]) @deprecation.deprecated_endpoints("initializers.ones", "ones_initializer")
Zeros
python
sqlalchemy__sqlalchemy
test/base/test_utils.py
{ "start": 84748, "end": 87829 }
class ____(fixtures.TestBase): def test_all_positional(self): class Foo: def __init__(self, a, b, c): self.a = a self.b = b self.c = c eq_(util.generic_repr(Foo(1, 2, 3)), "Foo(1, 2, 3)") def test_positional_plus_kw(self): class Foo: def __init__(self, a, b, c=5, d=4): self.a = a self.b = b self.c = c self.d = d eq_(util.generic_repr(Foo(1, 2, 3, 6)), "Foo(1, 2, c=3, d=6)") def test_kw_defaults(self): class Foo: def __init__(self, a=1, b=2, c=3, d=4): self.a = a self.b = b self.c = c self.d = d eq_(util.generic_repr(Foo(1, 5, 3, 7)), "Foo(b=5, d=7)") def test_multi_kw(self): class Foo: def __init__(self, a, b, c=3, d=4): self.a = a self.b = b self.c = c self.d = d class Bar(Foo): def __init__(self, e, f, g=5, **kw): self.e = e self.f = f self.g = g super().__init__(**kw) eq_( util.generic_repr( Bar("e", "f", g=7, a=6, b=5, d=9), to_inspect=[Bar, Foo] ), "Bar('e', 'f', g=7, a=6, b=5, d=9)", ) eq_( util.generic_repr(Bar("e", "f", a=6, b=5), to_inspect=[Bar, Foo]), "Bar('e', 'f', a=6, b=5)", ) def test_multi_kw_repeated(self): class Foo: def __init__(self, a=1, b=2): self.a = a self.b = b class Bar(Foo): def __init__(self, b=3, c=4, **kw): self.c = c super().__init__(b=b, **kw) eq_( util.generic_repr(Bar(a="a", b="b", c="c"), to_inspect=[Bar, Foo]), "Bar(b='b', c='c', a='a')", ) def test_discard_vargs(self): class Foo: def __init__(self, a, b, *args): self.a = a self.b = b self.c, self.d = args[0:2] eq_(util.generic_repr(Foo(1, 2, 3, 4)), "Foo(1, 2)") def test_discard_vargs_kwargs(self): class Foo: def __init__(self, a, b, *args, **kw): self.a = a self.b = b self.c, self.d = args[0:2] eq_(util.generic_repr(Foo(1, 2, 3, 4, x=7, y=4)), "Foo(1, 2)") def test_significant_vargs(self): class Foo: def __init__(self, a, b, *args): self.a = a self.b = b self.args = args eq_(util.generic_repr(Foo(1, 2, 3, 4)), "Foo(1, 2, 3, 4)") def test_no_args(self): class Foo: def __init__(self): pass eq_(util.generic_repr(Foo()), "Foo()") def test_no_init(self): class Foo: pass eq_(util.generic_repr(Foo()), "Foo()")
GenericReprTest
python
astropy__astropy
astropy/cosmology/_src/flrw/lambdacdm.py
{ "start": 672, "end": 22294 }
class ____(FLRW): """FLRW cosmology with a cosmological constant and curvature. This has no additional attributes beyond those of FLRW. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Ode0 : float Omega dark energy: density of the cosmological constant in units of the critical density at z=0. Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Examples -------- >>> from astropy.cosmology import LambdaCDM >>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7) The comoving distance in Mpc at redshift z: >>> z = 0.5 >>> dc = cosmo.comoving_distance(z) """ def __post_init__(self) -> None: super().__post_init__() # Please see :ref:`astropy-cosmology-fast-integrals` for discussion # about what is being done here. if self.Tcmb0.value == 0: inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_norel inv_efunc_scalar_args = (self.Om0, self.Ode0, self.Ok0) if self.Ok0 != 0: object.__setattr__( self, "_comoving_distance_z1z2", self._elliptic_comoving_distance_z1z2, ) elif not self._nu_info.has_massive_nu: inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_nomnu inv_efunc_scalar_args = ( self.Om0, self.Ode0, self.Ok0, self.Ogamma0 + self.Onu0, ) else: inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc inv_efunc_scalar_args = ( self.Om0, self.Ode0, self.Ok0, self.Ogamma0, self._nu_info.neff_per_nu, self._nu_info.n_massless_nu, self._nu_info.nu_y_list, ) object.__setattr__(self, "_inv_efunc_scalar", inv_efunc_scalar) object.__setattr__(self, "_inv_efunc_scalar_args", inv_efunc_scalar_args) if self.Tcmb0.value == 0 and self.Ok0 == 0: self._optimize_flat_norad() def _optimize_flat_norad(self) -> None: """Set optimizations for flat LCDM cosmologies with no radiation.""" # Call out the Om0=0 (de Sitter) and Om0=1 (Einstein-de Sitter) # The dS case is required because the hypergeometric case # for Omega_M=0 would lead to an infinity in its argument. # The EdS case is three times faster than the hypergeometric. if self.Om0 == 0: comoving_distance = self._dS_comoving_distance_z1z2 age = self._dS_age lookback_time = self._dS_lookback_time elif self.Om0 == 1: comoving_distance = self._EdS_comoving_distance_z1z2 age = self._EdS_age lookback_time = self._EdS_lookback_time else: comoving_distance = self._hypergeometric_comoving_distance_z1z2 age = self._flat_age lookback_time = self._flat_lookback_time object.__setattr__(self, "_comoving_distance_z1z2", comoving_distance) object.__setattr__(self, "_age", age) object.__setattr__(self, "_lookback_time", lookback_time) def w(self, z: Quantity | ArrayLike, /) -> FArray: r"""Returns dark energy equation of state at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'] or array-like Input redshift. .. versionchanged:: 7.0 Passing z as a keyword argument is deprecated. .. versionchanged:: 8.0 z must be a positional argument. Returns ------- w : ndarray or float The dark energy equation of state. Returns `float` if the input is scalar. Notes ----- The dark energy equation of state is defined as :math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\rho(z)` is the density at redshift z, both in units where c=1. Here this is :math:`w(z) = -1`. """ return np.full_like(aszarr(z), -1.0) def de_density_scale(self, z: Quantity | ArrayLike, /) -> FArray: r"""Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : Quantity-like ['redshift'] or array-like Input redshift. .. versionchanged:: 7.0 Passing z as a keyword argument is deprecated. .. versionchanged:: 8.0 z must be a positional argument. Returns ------- I : ndarray The scaling of the energy density of dark energy with redshift. Notes ----- The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, and in this case is given by :math:`I = 1`. """ return np.ones_like(aszarr(z)) def _elliptic_comoving_distance_z1z2( self, z1: Quantity | ArrayLike, z2: Quantity | ArrayLike, / ) -> Quantity: r"""Comoving transverse distance in Mpc between two redshifts. This value is the transverse comoving distance at redshift ``z`` corresponding to an angular separation of 1 radian. This is the same as the comoving distance if :math:`\Omega_k` is zero. For :math:`\Omega_{rad} = 0` the comoving distance can be directly calculated as an elliptic integral [1]_. Not valid or appropriate for flat cosmologies (Ok0=0). Parameters ---------- z1, z2 : Quantity-like ['redshift'] or array-like Input redshift. Returns ------- d : Quantity ['length'] Comoving distance in Mpc between each input redshift. References ---------- .. [1] Kantowski, R., Kao, J., & Thomas, R. (2000). Distance-Redshift in Inhomogeneous FLRW. arXiv e-prints, astro-ph/0002334. """ try: z1, z2 = np.broadcast_arrays(z1, z2) except ValueError as e: raise ValueError("z1 and z2 have different shapes") from e # The analytic solution is not valid for any of Om0, Ode0, Ok0 == 0. # Use the explicit integral solution for these cases. if self.Om0 == 0 or self.Ode0 == 0 or self.Ok0 == 0: return self._integral_comoving_distance_z1z2(z1, z2) b = -(27.0 / 2) * self.Om0**2 * self.Ode0 / self.Ok0**3 kappa = b / abs(b) if (b < 0) or (2 < b): v_k = pow(kappa * (b - 1) + sqrt(b * (b - 2)), 1.0 / 3) y1 = (-1 + kappa * (v_k + 1 / v_k)) / 3 A = sqrt(y1 * (3 * y1 + 2)) g = 1 / sqrt(A) k2 = (2 * A + kappa * (1 + 3 * y1)) / (4 * A) phi_z1 = phi_amplitude_b_outside(self.Om0, self.Ok0, kappa, y1, A, z1) phi_z2 = phi_amplitude_b_outside(self.Om0, self.Ok0, kappa, y1, A, z2) # Get lower-right 0<b<2 solution in Om0, Ode0 plane. # For the upper-left 0<b<2 solution the Big Bang didn't happen. elif (0 < b < 2) and self.Om0 > self.Ode0: yb = cos(acos(1 - b) / 3) yc = sqrt(3) * sin(acos(1 - b) / 3) y1 = (1.0 / 3) * (-1 + yb + yc) y2 = (1.0 / 3) * (-1 - 2 * yb) y3 = (1.0 / 3) * (-1 + yb - yc) g = 2 / sqrt(y1 - y2) k2 = (y1 - y3) / (y1 - y2) phi_z1 = phi_amplitude_b_inside(self.Om0, self.Ok0, y1, y2, z1) phi_z2 = phi_amplitude_b_inside(self.Om0, self.Ok0, y1, y2, z2) else: return self._integral_comoving_distance_z1z2(z1, z2) prefactor = self.hubble_distance / sqrt(abs(self.Ok0)) return prefactor * g * (ellipkinc(phi_z1, k2) - ellipkinc(phi_z2, k2)) def _dS_comoving_distance_z1z2( self, z1: Quantity | ArrayLike, z2: Quantity | ArrayLike, / ) -> Quantity: r"""De Sitter comoving LoS distance in Mpc between two redshifts. The Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` and ``z2`` in a flat, :math:`\Omega_{\Lambda}=1` cosmology (de Sitter). The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. The de Sitter case has an analytic solution. Parameters ---------- z1, z2 : Quantity-like ['redshift'] or array-like Input redshifts. Must be 1D or scalar. Returns ------- d : Quantity ['length'] Comoving distance in Mpc between each input redshift. """ try: z1, z2 = np.broadcast_arrays(z1, z2) except ValueError as e: raise ValueError("z1 and z2 have different shapes") from e return self.hubble_distance * (z2 - z1) def _EdS_comoving_distance_z1z2( self, z1: Quantity | ArrayLike, z2: Quantity | ArrayLike, / ) -> Quantity: r"""Einstein-de Sitter comoving LoS distance in Mpc between two redshifts. The Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` and ``z2`` in a flat, :math:`\Omega_M=1` cosmology (Einstein - de Sitter). The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. For :math:`\Omega_M=1`, :math:`\Omega_{rad}=0` the comoving distance has an analytic solution. Parameters ---------- z1, z2 : Quantity-like ['redshift'] or array-like Input redshifts. Must be 1D or scalar. Returns ------- d : Quantity ['length'] Comoving distance in Mpc between each input redshift. """ try: z1, z2 = np.broadcast_arrays(z1, z2) except ValueError as e: raise ValueError("z1 and z2 have different shapes") from e prefactor = 2 * self.hubble_distance return prefactor * ((z1 + 1.0) ** (-1.0 / 2) - (z2 + 1.0) ** (-1.0 / 2)) def _hypergeometric_comoving_distance_z1z2( self, z1: Quantity | ArrayLike, z2: Quantity | ArrayLike, / ) -> Quantity: r"""Hypergeoemtric comoving LoS distance in Mpc between two redshifts. The Comoving line-of-sight distance in Mpc at redshifts ``z1`` and ``z2``. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. For :math:`\Omega_{rad} = 0` the comoving distance can be directly calculated as a hypergeometric function [1]_. Parameters ---------- z1, z2 : Quantity-like ['redshift'] or array-like Input redshifts. Returns ------- d : Quantity ['length'] Comoving distance in Mpc between each input redshift. References ---------- .. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical expressions and numerical evaluation of the luminosity distance in a flat cosmology. MNRAS, 468(1), 927-930. """ try: z1, z2 = np.broadcast_arrays(z1, z2) except ValueError as e: raise ValueError("z1 and z2 have different shapes") from e s = ((1 - self.Om0) / self.Om0) ** (1.0 / 3) # Use np.sqrt here to handle negative s (Om0>1). prefactor = self.hubble_distance / np.sqrt(s * self.Om0) return prefactor * ( self._T_hypergeometric(s / (z1 + 1.0)) - self._T_hypergeometric(s / (z2 + 1.0)) ) def _T_hypergeometric(self, x: float, /) -> float: r"""Compute value using Gauss Hypergeometric function 2F1. .. math:: T(x) = 2 \sqrt(x) _{2}F_{1}\left(\frac{1}{6}, \frac{1}{2}; \frac{7}{6}; -x^3 \right) Notes ----- The :func:`scipy.special.hyp2f1` code already implements the hypergeometric transformation suggested by Baes et al. [1]_ for use in actual numerical evaluations. References ---------- .. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical expressions and numerical evaluation of the luminosity distance in a flat cosmology. MNRAS, 468(1), 927-930. """ return 2 * np.sqrt(x) * hyp2f1(1.0 / 6, 1.0 / 2, 7.0 / 6, -(x**3)) def _dS_age(self, z: Quantity | ArrayLike, /) -> Quantity: """Age of the universe in Gyr at redshift ``z``. The age of a de Sitter Universe is infinite. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. Returns ------- t : Quantity ['time'] The age of the universe in Gyr at each input redshift. """ t = inf if isinstance(z, Number) else np.full_like(z, inf, dtype=float) return self.hubble_time * t def _EdS_age(self, z: Quantity | ArrayLike, /) -> Quantity: r"""Age of the universe in Gyr at redshift ``z``. For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos) the age can be directly calculated as an elliptic integral [1]_. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. Returns ------- t : Quantity ['time'] The age of the universe in Gyr at each input redshift. References ---------- .. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for standard cosmology. PRD, 62(10), 103507. """ return (2.0 / 3) * self.hubble_time * (aszarr(z) + 1.0) ** (-1.5) def _flat_age(self, z: Quantity | ArrayLike, /) -> Quantity: r"""Age of the universe in Gyr at redshift ``z``. For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos) the age can be directly calculated as an elliptic integral [1]_. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. Returns ------- t : Quantity ['time'] The age of the universe in Gyr at each input redshift. References ---------- .. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for standard cosmology. PRD, 62(10), 103507. """ # Use np.sqrt, np.arcsinh instead of math.sqrt, math.asinh # to handle properly the complex numbers for 1 - Om0 < 0 prefactor = (2.0 / 3) * self.hubble_time / np.emath.sqrt(1 - self.Om0) arg = np.arcsinh( np.emath.sqrt((1 / self.Om0 - 1 + 0j) / (aszarr(z) + 1.0) ** 3) ) return (prefactor * arg).real def _EdS_lookback_time(self, z: Quantity | ArrayLike, /) -> Quantity: r"""Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos) the age can be directly calculated as an elliptic integral. The lookback time is here calculated based on the ``age(0) - age(z)``. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. Returns ------- t : Quantity ['time'] Lookback time in Gyr to each input redshift. """ return self._EdS_age(0) - self._EdS_age(z) def _dS_lookback_time(self, z: Quantity | ArrayLike, /) -> Quantity: r"""Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos) the age can be directly calculated. .. math:: a = exp(H * t) \ \text{where t=0 at z=0} t = (1/H) (ln 1 - ln a) = (1/H) (0 - ln (1/(1+z))) = (1/H) ln(1+z) Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. Returns ------- t : Quantity ['time'] Lookback time in Gyr to each input redshift. """ return self.hubble_time * log(aszarr(z) + 1.0) def _flat_lookback_time(self, z: Quantity | ArrayLike, /) -> Quantity: r"""Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos) the age can be directly calculated. The lookback time is here calculated based on the ``age(0) - age(z)``. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. Returns ------- t : Quantity ['time'] Lookback time in Gyr to each input redshift. """ return self._flat_age(0) - self._flat_age(z) def efunc(self, z: Quantity | ArrayLike, /) -> FArray: """Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. .. versionchanged:: 7.0 Passing z as a keyword argument is deprecated. .. versionchanged:: 8.0 z must be a positional argument. Returns ------- E : ndarray The redshift scaling of the Hubble constant. Defined such that :math:`H(z) = H_0 E(z)`. """ # We override this because it takes a particularly simple # form for a cosmological constant Or = self.Ogamma0 + ( self.Onu0 if not self._nu_info.has_massive_nu else self.Ogamma0 * self.nu_relative_density(z) ) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return np.sqrt(zp1**2 * ((Or * zp1 + self.Om0) * zp1 + self.Ok0) + self.Ode0) def inv_efunc(self, z: Quantity | ArrayLike, /) -> FArray: r"""Function used to calculate :math:`\frac{1}{H_z}`. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. .. versionchanged:: 7.0 Passing z as a keyword argument is deprecated. .. versionchanged:: 8.0 z must be a positional argument. Returns ------- E : ndarray The inverse redshift scaling of the Hubble constant. Defined such that :math:`H_z = H_0 / E`. """ Or = self.Ogamma0 + ( self.Onu0 if not self._nu_info.has_massive_nu else self.Ogamma0 * self.nu_relative_density(z) ) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return (zp1**2 * ((Or * zp1 + self.Om0) * zp1 + self.Ok0) + self.Ode0) ** (-0.5) def phi_amplitude_b_outside( Om0: float, Ok0: float, kappa: float, y1: float, A: float, z: NDArray[np.number], / ) -> NDArray[np.number]: r"""Phi amplitude for b<0 and b>2 cases. In the Kantowski-Kao-Thomas classification this is the "two large domains" with $b \le 0$ or $b \ge 2$, i.e. the one-real-root branch of the cubic (their Case A1). This is the regime treated by their first set of elliptic-integral formulas (eqs. (8)-(13) in II.A.1). """ return np.arccos( ((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 - A) / ((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 + A) ) def phi_amplitude_b_inside( Om0: float, Ok0: float, y1: float, y2: float, z: NDArray[np.number], / ) -> NDArray[np.number]: r"""Phi amplitude for 0<b<2 case. See `phi_amplitude_b_outside`.""" return np.arcsin(np.sqrt((y1 - y2) / ((z + 1.0) * Om0 / abs(Ok0) + y1))) @dataclass_decorator
LambdaCDM
python
pypa__pipenv
pipenv/vendor/plette/lockfiles.py
{ "start": 1552, "end": 5241 }
class ____(DataModel): """Representation of a Pipfile.lock. """ __SCHEMA__ = { "_meta": {"type": "dict", "required": True}, "default": {"type": "dict", "required": True}, "develop": {"type": "dict", "required": True}, } @classmethod def validate(cls, data): for key, value in data.items(): if key == "_meta": Meta.validate(value) else: PackageCollection.validate(value) @classmethod def load(cls, f, encoding=None): if encoding is None: data = json.load(f) else: data = json.loads(f.read().decode(encoding)) return cls(data) @classmethod def with_meta_from(cls, pipfile, categories=None): data = { "_meta": { "hash": _copy_jsonsafe(pipfile.get_hash()._data), "pipfile-spec": PIPFILE_SPEC_CURRENT, "requires": _copy_jsonsafe(pipfile._data.get("requires", {})), "sources": _copy_jsonsafe(pipfile.sources._data), }, } if categories is None: data["default"] = _copy_jsonsafe(pipfile._data.get("packages", {})) data["develop"] = _copy_jsonsafe(pipfile._data.get("dev-packages", {})) else: for category in categories: if category == "default" or category == "packages": data["default"] = _copy_jsonsafe(pipfile._data.get("packages", {})) elif category == "develop" or category == "dev-packages": data["develop"] = _copy_jsonsafe(pipfile._data.get("dev-packages", {})) else: data[category] = _copy_jsonsafe(pipfile._data.get(category, {})) if "default" not in data: data["default"] = {} if "develop" not in data: data["develop"] = {} return cls(data) def __getitem__(self, key): value = self._data[key] try: if key == "_meta": return Meta(value) else: return PackageCollection(value) except KeyError: return value def __setitem__(self, key, value): if isinstance(value, DataModel): self._data[key] = value._data else: self._data[key] = value def is_up_to_date(self, pipfile): return self.meta.hash == pipfile.get_hash() def dump(self, f, encoding=None): encoder = _LockFileEncoder() if encoding is None: for chunk in encoder.iterencode(self._data): f.write(chunk) else: content = encoder.encode(self._data) f.write(content.encode(encoding)) @property def meta(self): try: return self["_meta"] except KeyError: raise AttributeError("meta") @meta.setter def meta(self, value): self["_meta"] = value @property def _meta(self): try: return self["_meta"] except KeyError: raise AttributeError("meta") @_meta.setter def _meta(self, value): self["_meta"] = value @property def default(self): try: return self["default"] except KeyError: raise AttributeError("default") @default.setter def default(self, value): self["default"] = value @property def develop(self): try: return self["develop"] except KeyError: raise AttributeError("develop") @develop.setter def develop(self, value): self["develop"] = value
Lockfile
python
facebook__pyre-check
client/commands/tests/language_server_test.py
{ "start": 49704, "end": 52570 }
class ____(testslide.TestCase, abc.ABC): def _assert_json_equal( self, actual_json_string: str, expected_json_string: str, ) -> None: self.assertEqual( json.loads(actual_json_string), json.loads(expected_json_string), ) def _expect_success_message( self, result: object, request_id: int = server_setup.DEFAULT_REQUEST_ID, ) -> Callable[[str], None]: return lambda actual_json_string: self._assert_json_equal( actual_json_string, server_setup.success_response_json( result=result, request_id=request_id, ), ) def _expect_diagnostics( self, uri: str, diagnostics: List[lsp.Diagnostic], ) -> Callable[[str], None]: def expectation(actual_json_string: str) -> None: actual_output = json.loads(actual_json_string) self.assertEqual(actual_output["method"], "textDocument/publishDiagnostics") parameters = actual_output["params"] self.assertEqual(parameters["uri"], uri) self.assertEqual( parameters["diagnostics"], [diagnostic.to_dict() for diagnostic in diagnostics], ) return expectation def _expect_telemetry_event( self, operation: str, result: Optional[object], additional_keys: Optional[Dict[str, object]] = None, ) -> Callable[[str], None]: """ operation - to compare the `operation` key with result - to compare the `response` key with additional_keys - specify these to test specific keys in the recorded telemetry json """ def expectation(actual_json_string: str) -> None: actual_telemetry = json.loads(actual_json_string) self.assertEqual(actual_telemetry["method"], "telemetry/event") telemetry_params = actual_telemetry["params"] self.assertEqual(telemetry_params["operation"], operation) if result is not None: self.assertEqual(telemetry_params["response"], result) if additional_keys: for key, expected in additional_keys.items(): self.assertEqual(telemetry_params[key], expected) return expectation def _assert_output_messages( self, output_writer: connections.MemoryBytesWriter, expectations: List[Callable[[str], None]], ) -> None: self.assertEqual( len(output_writer.items()), len(expectations), ) for raw_message, expectation in zip(output_writer.items(), expectations): json_string = server_setup.extract_json_from_json_rpc_message(raw_message) expectation(json_string)
ApiTestCase
python
google__pytype
pytype/metrics.py
{ "start": 9668, "end": 12061 }
class ____(Metric): """A metric to track memory usage via tracemalloc snapshots.""" def __init__( self, name, enabled=False, groupby="lineno", nframes=1, count=10 ): if enabled and tracemalloc is None: raise RuntimeError("tracemalloc module couldn't be imported") super().__init__(name) self.snapshots = [] # The metric to group memory blocks by. Default is "lineno", which groups by # which file and line allocated the block. The other useful value is # "traceback", which groups by the stack frames leading to each allocation. self.groupby = groupby # The number of stack frames to store per memory block. Values greater than # 1 are only useful if groupby = "traceback". self.nframes = nframes # The number of memory block statistics to save. self.count = count self.running = False # Two conditions must be met for memory snapshots to be taken: # 1. Metrics have been enabled (global _enabled) # 2. Explicitly enabled by the arg to the constructor (which should be the # options.memory_snapshot flag set by the --memory-snapshots option) self.enabled = _enabled and enabled def _start_tracemalloc(self): tracemalloc.start(self.nframes) self.running = True def _stop_tracemalloc(self): tracemalloc.stop() self.running = False def take_snapshot(self, where=""): """Stores a tracemalloc snapshot.""" if not self.enabled: return if not self.running: self._start_tracemalloc() snap = tracemalloc.take_snapshot() # Store the top self.count memory consumers by self.groupby # We can't just store the list of statistics though! Statistic.__eq__ # doesn't take None into account during comparisons, and json will compare # it to None when trying to process it, causing an error. So, store it as a # string instead. self.snapshots.append( "{}:\n{}".format( where, "\n".join(map(str, snap.statistics(self.groupby)[: self.count])), ) ) def __enter__(self): if not self.enabled: return self._start_tracemalloc() self.take_snapshot("__enter__") def __exit__(self, exc_type, exc_value, traceback): if not self.running: return self.take_snapshot("__exit__") self._stop_tracemalloc() def _summary(self): return "\n\n".join(self.snapshots)
Snapshot
python
fluentpython__example-code
14-it-generator/sentence_gen.py
{ "start": 122, "end": 447 }
class ____: def __init__(self, text): self.text = text self.words = RE_WORD.findall(text) def __repr__(self): return 'Sentence(%s)' % reprlib.repr(self.text) def __iter__(self): for word in self.words: # <1> yield word # <2> return # <3> # done! <4>
Sentence
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_city_name.py
{ "start": 906, "end": 1901 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.valid_city_name" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.apply(lambda x: is_valid_city_name(x)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidCityName
python
facebookresearch__faiss
tests/test_index_accuracy.py
{ "start": 5410, "end": 10562 }
class ____(unittest.TestCase): """tests IP in addition to L2, non multiple of 8 dimensions""" def add2columns(self, x): return np.hstack((x, np.zeros((x.shape[0], 2), dtype="float32"))) def subtest_add2col(self, xb, xq, index, qname): """Test with 2 additional dimensions to take also the non-SIMD codepath. We don't retrain anything but add 2 dims to the queries, the centroids and the trained ScalarQuantizer. """ nb, d = xb.shape d2 = d + 2 xb2 = self.add2columns(xb) xq2 = self.add2columns(xq) nlist = index.nlist quantizer = faiss.downcast_index(index.quantizer) quantizer2 = faiss.IndexFlat(d2, index.metric_type) centroids = faiss.vector_to_array(quantizer.codes) centroids = centroids.view("float32").reshape(nlist, d) centroids2 = self.add2columns(centroids) quantizer2.add(centroids2) index2 = faiss.IndexIVFScalarQuantizer( quantizer2, d2, index.nlist, index.sq.qtype, index.metric_type ) index2.nprobe = 4 if qname in ("8bit", "4bit"): trained = faiss.vector_to_array(index.sq.trained).reshape(2, -1) nt = trained.shape[1] # 2 lines: vmins and vdiffs new_nt = int(nt * d2 / d) trained2 = np.hstack((trained, np.zeros((2, new_nt - nt), dtype="float32"))) trained2[1, nt:] = 1.0 # set vdiff to 1 to avoid div by 0 faiss.copy_array_to_vector(trained2.ravel(), index2.sq.trained) else: index2.sq.trained = index.sq.trained index2.is_trained = True index2.add(xb2) return index2.search(xq2, 10) # run on Sept 18, 2018 with nprobe=4 + 4 bit bugfix ref_results = { (0, "8bit"): 984, (0, "4bit"): 978, (0, "8bit_uniform"): 985, (0, "4bit_uniform"): 979, (0, "fp16"): 985, (1, "8bit"): 979, (1, "4bit"): 973, (1, "8bit_uniform"): 979, (1, "4bit_uniform"): 972, (1, "fp16"): 979, # added 2019-06-26 (0, "6bit"): 985, (1, "6bit"): 987, } def subtest(self, mt): d = 32 xt, xb, xq = get_dataset_2(d, 2000, 1000, 200) nlist = 64 gt_index = faiss.IndexFlat(d, mt) gt_index.add(xb) gt_D, gt_I = gt_index.search(xq, 10) quantizer = faiss.IndexFlat(d, mt) for qname in "8bit 4bit 8bit_uniform 4bit_uniform fp16 6bit".split(): qtype = getattr(faiss.ScalarQuantizer, "QT_" + qname) index = faiss.IndexIVFScalarQuantizer(quantizer, d, nlist, qtype, mt) index.train(xt) index.add(xb) index.nprobe = 4 # hopefully more robust than 1 D, I = index.search(xq, 10) ninter = faiss.eval_intersection(I, gt_I) assert abs(ninter - self.ref_results[(mt, qname)]) <= 10 if qname == "6bit": # the test below fails triggers ASAN. TODO check what's wrong continue D2, I2 = self.subtest_add2col(xb, xq, index, qname) assert np.all(I2 == I) # also test range search if mt == faiss.METRIC_INNER_PRODUCT: radius = float(D[:, -1].max()) else: radius = float(D[:, -1].min()) lims, D3, I3 = index.range_search(xq, radius) ntot = ndiff = 0 for i in range(len(xq)): l0, l1 = lims[i], lims[i + 1] Inew = set(I3[l0:l1]) if mt == faiss.METRIC_INNER_PRODUCT: mask = D2[i] > radius else: mask = D2[i] < radius Iref = set(I2[i, mask]) ndiff += len(Inew ^ Iref) ntot += len(Iref) assert ndiff < ntot * 0.01 for pm in 1, 2: index.parallel_mode = pm lims4, D4, I4 = index.range_search(xq, radius) for qno in range(len(lims) - 1): Iref = I3[lims[qno]: lims[qno + 1]] Inew = I4[lims4[qno]: lims4[qno + 1]] assert set(Iref) == set(Inew), "q %d ref %s new %s" % ( qno, Iref, Inew, ) def test_SQ_IP(self): self.subtest(faiss.METRIC_INNER_PRODUCT) def test_SQ_L2(self): self.subtest(faiss.METRIC_L2) def test_parallel_mode(self): d = 32 xt, xb, xq = get_dataset_2(d, 2000, 1000, 200) index = faiss.index_factory(d, "IVF64,SQ8") index.train(xt) index.add(xb) index.nprobe = 4 # hopefully more robust than 1 Dref, Iref = index.search(xq, 10) for pm in 1, 2, 3: index.parallel_mode = pm Dnew, Inew = index.search(xq, 10) np.testing.assert_array_equal(Iref, Inew) np.testing.assert_array_equal(Dref, Dnew)
TestSQFlavors
python
pypa__pip
src/pip/_internal/operations/build/build_tracker.py
{ "start": 1760, "end": 4771 }
class ____: """Ensure that an sdist cannot request itself as a setup requirement. When an sdist is prepared, it identifies its setup requirements in the context of ``BuildTracker.track()``. If a requirement shows up recursively, this raises an exception. This stops fork bombs embedded in malicious packages.""" def __init__(self, root: str) -> None: self._root = root self._entries: dict[TrackerId, InstallRequirement] = {} logger.debug("Created build tracker: %s", self._root) def __enter__(self) -> BuildTracker: logger.debug("Entered build tracker: %s", self._root) return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.cleanup() def _entry_path(self, key: TrackerId) -> str: hashed = hashlib.sha224(key.encode()).hexdigest() return os.path.join(self._root, hashed) def add(self, req: InstallRequirement, key: TrackerId) -> None: """Add an InstallRequirement to build tracking.""" # Get the file to write information about this requirement. entry_path = self._entry_path(key) # Try reading from the file. If it exists and can be read from, a build # is already in progress, so a LookupError is raised. try: with open(entry_path) as fp: contents = fp.read() except FileNotFoundError: pass else: message = f"{req.link} is already being built: {contents}" raise LookupError(message) # If we're here, req should really not be building already. assert key not in self._entries # Start tracking this requirement. with open(entry_path, "w", encoding="utf-8") as fp: fp.write(str(req)) self._entries[key] = req logger.debug("Added %s to build tracker %r", req, self._root) def remove(self, req: InstallRequirement, key: TrackerId) -> None: """Remove an InstallRequirement from build tracking.""" # Delete the created file and the corresponding entry. os.unlink(self._entry_path(key)) del self._entries[key] logger.debug("Removed %s from build tracker %r", req, self._root) def cleanup(self) -> None: for key, req in list(self._entries.items()): self.remove(req, key) logger.debug("Removed build tracker: %r", self._root) @contextlib.contextmanager def track(self, req: InstallRequirement, key: str) -> Generator[None, None, None]: """Ensure that `key` cannot install itself as a setup requirement. :raises LookupError: If `key` was already provided in a parent invocation of the context introduced by this method.""" tracker_id = TrackerId(key) self.add(req, tracker_id) yield self.remove(req, tracker_id)
BuildTracker
python
google__jax
jaxlib/gpu_common_utils.py
{ "start": 630, "end": 905 }
class ____(Exception): """Raised when the GPU library is not linked.""" error_msg = ( 'JAX was not built with GPU support. Please use a GPU-enabled JAX to use' ' this function.' ) def __init__(self): super().__init__(self.error_msg)
GpuLibNotLinkedError
python
pytorch__pytorch
test/dynamo/test_functions.py
{ "start": 86699, "end": 87218 }
class ____(torch.nn.Module): def forward(self, L_x_: "f32[2, 2]"): l_x_ = L_x_ mul: "f32[2, 2]" = l_x_ * 4 mul_1: "f32[2, 2]" = mul * l_x_; mul = None mul_2: "f32[2, 2]" = 20 * l_x_; l_x_ = None mul_3: "f32[2, 2]" = torch.mul(mul_1, mul_2); mul_1 = mul_2 = None return (mul_3,) """, ) else: self.assertExpectedInline( normalize_gm(backend.graphs[0].print_readable(print_output=False)), """\
GraphModule
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/classes5.py
{ "start": 136, "end": 549 }
class ____: cv1: ClassVar[int] = 0 cv2: ClassVar[int] = 0 cv3: ClassVar[int] = 0 cv4: ClassVar[int] = 0 var1: int var2: str var3: int | str var4: int var5: int var6: int var7: list[float] var8: list[int] var9: int _var1: int __var1: int def __init__(self): self.var10: int = 0 self.var11: int = 0 self.var12 = 0
ParentClass1
python
walkccc__LeetCode
solutions/1695. Maximum Erasure Value/1695.py
{ "start": 0, "end": 349 }
class ____: def maximumUniqueSubarray(self, nums: list[int]) -> int: ans = 0 score = 0 seen = set() l = 0 for r, num in enumerate(nums): while num in seen: score -= nums[l] seen.remove(nums[l]) l += 1 seen.add(nums[r]) score += nums[r] ans = max(ans, score) return ans
Solution
python
Farama-Foundation__Gymnasium
gymnasium/envs/classic_control/cartpole.py
{ "start": 13958, "end": 22781 }
class ____(VectorEnv): metadata = { "render_modes": ["rgb_array"], "render_fps": 50, "autoreset_mode": AutoresetMode.NEXT_STEP, } def __init__( self, num_envs: int = 1, max_episode_steps: int = 500, render_mode: str | None = None, sutton_barto_reward: bool = False, ): self._sutton_barto_reward = sutton_barto_reward self.num_envs = num_envs self.max_episode_steps = max_episode_steps self.render_mode = render_mode self.gravity = 9.8 self.masscart = 1.0 self.masspole = 0.1 self.total_mass = self.masspole + self.masscart self.length = 0.5 # actually half the pole's length self.polemass_length = self.masspole * self.length self.force_mag = 10.0 self.tau = 0.02 # seconds between state updates self.kinematics_integrator = "euler" self.state = None self.steps = np.zeros(num_envs, dtype=np.int32) self.prev_done = np.zeros(num_envs, dtype=np.bool_) # Angle at which to fail the episode self.theta_threshold_radians = 12 * 2 * math.pi / 360 self.x_threshold = 2.4 # Angle limit set to 2 * theta_threshold_radians so failing observation # is still within bounds. high = np.array( [ self.x_threshold * 2, np.inf, self.theta_threshold_radians * 2, np.inf, ], dtype=np.float32, ) self.low = -0.05 self.high = 0.05 self.single_action_space = spaces.Discrete(2) self.action_space = batch_space(self.single_action_space, num_envs) self.single_observation_space = spaces.Box(-high, high, dtype=np.float32) self.observation_space = batch_space(self.single_observation_space, num_envs) self.screen_width = 600 self.screen_height = 400 self.screens = None self.surf = None self.steps_beyond_terminated = None def step( self, action: np.ndarray ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, dict]: assert self.action_space.contains( action ), f"{action!r} ({type(action)}) invalid" assert self.state is not None, "Call reset before using step method." x, x_dot, theta, theta_dot = self.state force = np.sign(action - 0.5) * self.force_mag costheta = np.cos(theta) sintheta = np.sin(theta) # For the interested reader: # https://coneural.org/florian/papers/05_cart_pole.pdf temp = ( force + self.polemass_length * np.square(theta_dot) * sintheta ) / self.total_mass thetaacc = (self.gravity * sintheta - costheta * temp) / ( self.length * (4.0 / 3.0 - self.masspole * np.square(costheta) / self.total_mass) ) xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass if self.kinematics_integrator == "euler": x = x + self.tau * x_dot x_dot = x_dot + self.tau * xacc theta = theta + self.tau * theta_dot theta_dot = theta_dot + self.tau * thetaacc else: # semi-implicit euler x_dot = x_dot + self.tau * xacc x = x + self.tau * x_dot theta_dot = theta_dot + self.tau * thetaacc theta = theta + self.tau * theta_dot self.state = np.stack((x, x_dot, theta, theta_dot)) terminated: np.ndarray = ( (x < -self.x_threshold) | (x > self.x_threshold) | (theta < -self.theta_threshold_radians) | (theta > self.theta_threshold_radians) ) self.steps += 1 truncated = self.steps >= self.max_episode_steps if self._sutton_barto_reward: reward = -np.array(terminated, dtype=np.float32) else: reward = np.ones_like(terminated, dtype=np.float32) # Reset all environments which terminated or were truncated in the last step self.state[:, self.prev_done] = self.np_random.uniform( low=self.low, high=self.high, size=(4, self.prev_done.sum()) ) self.steps[self.prev_done] = 0 reward[self.prev_done] = 0.0 terminated[self.prev_done] = False truncated[self.prev_done] = False self.prev_done = np.logical_or(terminated, truncated) return self.state.T.astype(np.float32), reward, terminated, truncated, {} def reset( self, *, seed: int | None = None, options: dict | None = None, ): super().reset(seed=seed) # Note that if you use custom reset bounds, it may lead to out-of-bound # state/observations. # -0.05 and 0.05 is the default low and high bounds self.low, self.high = utils.maybe_parse_reset_bounds(options, -0.05, 0.05) self.state = self.np_random.uniform( low=self.low, high=self.high, size=(4, self.num_envs) ) self.steps_beyond_terminated = None self.steps = np.zeros(self.num_envs, dtype=np.int32) self.prev_done = np.zeros(self.num_envs, dtype=np.bool_) return self.state.T.astype(np.float32), {} def render(self): if self.render_mode is None: assert self.spec is not None gym.logger.warn( "You are calling render method without specifying any render mode. " "You can specify the render_mode at initialization, " f'e.g. gym.make_vec("{self.spec.id}", render_mode="rgb_array")' ) return try: import pygame from pygame import gfxdraw except ImportError: raise DependencyNotInstalled( 'pygame is not installed, run `pip install "gymnasium[classic_control]"`' ) if self.screens is None: pygame.init() self.screens = [ pygame.Surface((self.screen_width, self.screen_height)) for _ in range(self.num_envs) ] world_width = self.x_threshold * 2 scale = self.screen_width / world_width polewidth = 10.0 polelen = scale * (2 * self.length) cartwidth = 50.0 cartheight = 30.0 if self.state is None: raise ValueError( "Cartpole's state is None, it probably hasn't be reset yet." ) for x, screen in zip(self.state.T, self.screens): assert isinstance(x, np.ndarray) and x.shape == (4,) self.surf = pygame.Surface((self.screen_width, self.screen_height)) self.surf.fill((255, 255, 255)) l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2 axleoffset = cartheight / 4.0 cartx = x[0] * scale + self.screen_width / 2.0 # MIDDLE OF CART carty = 100 # TOP OF CART cart_coords = [(l, b), (l, t), (r, t), (r, b)] cart_coords = [(c[0] + cartx, c[1] + carty) for c in cart_coords] gfxdraw.aapolygon(self.surf, cart_coords, (0, 0, 0)) gfxdraw.filled_polygon(self.surf, cart_coords, (0, 0, 0)) l, r, t, b = ( -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2, ) pole_coords = [] for coord in [(l, b), (l, t), (r, t), (r, b)]: coord = pygame.math.Vector2(coord).rotate_rad(-x[2]) coord = (coord[0] + cartx, coord[1] + carty + axleoffset) pole_coords.append(coord) gfxdraw.aapolygon(self.surf, pole_coords, (202, 152, 101)) gfxdraw.filled_polygon(self.surf, pole_coords, (202, 152, 101)) gfxdraw.aacircle( self.surf, int(cartx), int(carty + axleoffset), int(polewidth / 2), (129, 132, 203), ) gfxdraw.filled_circle( self.surf, int(cartx), int(carty + axleoffset), int(polewidth / 2), (129, 132, 203), ) gfxdraw.hline(self.surf, 0, self.screen_width, carty, (0, 0, 0)) self.surf = pygame.transform.flip(self.surf, False, True) screen.blit(self.surf, (0, 0)) return [ np.transpose(np.array(pygame.surfarray.pixels3d(screen)), axes=(1, 0, 2)) for screen in self.screens ] def close(self): if self.screens is not None: import pygame pygame.quit()
CartPoleVectorEnv
python
great-expectations__great_expectations
great_expectations/datasource/fluent/data_asset/path/dataframe_partitioners.py
{ "start": 1784, "end": 2209 }
class ____(_PartitionerDatetime): column_name: str sort_ascending: bool = True method_name: Literal["partition_on_year_and_month"] = "partition_on_year_and_month" @property @override def param_names(self) -> List[str]: return ["year", "month"] @override def partitioner_method_kwargs(self) -> Dict[str, str]: return {"column_name": self.column_name}
DataframePartitionerMonthly
python
facebook__pyre-check
tools/generate_taint_models/tests/get_filtered_sources_test.py
{ "start": 769, "end": 9824 }
class ____(unittest.TestCase): @patch.object(RESTApiSourceGenerator, "generate_models") @patch.object(AnnotatedFreeFunctionWithDecoratorGenerator, "generate_models") def test_compute_models_with_no_intersection( self, mock_annotated_decorator_generate_models, mock_RESTapi_decorator_generate_models, ) -> None: function_definition_str = "def testA(x): pass" function_definition = ast.parse(function_definition_str).body[0] mock_RESTapi_decorator_generate_models.return_value = { CallableModel( testB, parameter_annotation=AllParametersAnnotation( arg="TaintSource[UserControlled]", vararg="TaintSource[UserControlled]", kwarg="TaintSource[UserControlled]", ), ) } mock_annotated_decorator_generate_models.return_value = { FunctionDefinitionModel( # pyre-ignore: Incompatible parameter type [6] function_definition, parameter_annotation=AllParametersAnnotation( arg="TaintSource[UserControlled]", vararg="TaintSource[UserControlled]", kwarg="TaintSource[UserControlled]", ), qualifier="tools.pyre.tools.generate_taint_models.tests.test_functions", ) } # Functions from RESTApiSourceGenerator should appear self.assertEqual( [ *map( str, FilteredSourceGenerator( superset_generator=RESTApiSourceGenerator( django_urls=MagicMock() ), subset_generator=AnnotatedFreeFunctionWithDecoratorGenerator( root="/root", annotation_specifications=[] ), ).compute_models(all_functions), ) ], [ "def tools.pyre.tools.generate_taint_models.tests.test_functions." "testB(x: TaintSource[UserControlled]): ..." ], ) @patch.object(RESTApiSourceGenerator, "generate_models") @patch.object(AnnotatedFreeFunctionWithDecoratorGenerator, "generate_models") def test_compute_models_with_some_intersection( self, mock_annotated_decorator_generate_models, mock_RESTapi_decorator_generate_models, ) -> None: function_definition_str = "def testB(x): pass" function_definition = ast.parse(function_definition_str).body[0] mock_RESTapi_decorator_generate_models.return_value = { CallableModel( testC, parameter_annotation=AllParametersAnnotation( arg="TaintSource[UserControlled]", vararg="TaintSource[UserControlled]", kwarg="TaintSource[UserControlled]", ), ), CallableModel( testB, parameter_annotation=AllParametersAnnotation( arg="TaintSource[UserControlled]", vararg="TaintSource[UserControlled]", kwarg="TaintSource[UserControlled]", ), ), } mock_annotated_decorator_generate_models.return_value = { FunctionDefinitionModel( # pyre-ignore: Incompatible parameter type [6] function_definition, parameter_annotation=AllParametersAnnotation( arg="TaintSource[UserControlled]", vararg="TaintSource[UserControlled]", kwarg="TaintSource[UserControlled]", ), qualifier="tools.pyre.tools.generate_taint_models.tests.test_functions", ) } # Functions that are in RESTApiSourceGenerator but not in # AnnotatedFreeFunctionWithDecoratorGenerator should appear self.assertEqual( [ *map( str, FilteredSourceGenerator( superset_generator=RESTApiSourceGenerator( django_urls=MagicMock() ), subset_generator=AnnotatedFreeFunctionWithDecoratorGenerator( root="/root", annotation_specifications=[] ), ).compute_models(all_functions), ) ], [ "def tools.pyre.tools.generate_taint_models.tests.test_functions." "testC(x: TaintSource[UserControlled]): ..." ], ) @patch.object(RESTApiSourceGenerator, "generate_models") @patch.object(AnnotatedFreeFunctionWithDecoratorGenerator, "generate_models") def test_compute_models_with_complete_intersection( self, mock_annotated_decorator_generate_models, mock_RESTapi_decorator_generate_models, ) -> None: function_definition_str = "def testB(x): pass" function_definition = ast.parse(function_definition_str).body[0] mock_RESTapi_decorator_generate_models.return_value = { CallableModel( testB, parameter_annotation=AllParametersAnnotation( arg="TaintSource[UserControlled]", vararg="TaintSource[UserControlled]", kwarg="TaintSource[UserControlled]", ), ) } mock_annotated_decorator_generate_models.return_value = { FunctionDefinitionModel( # pyre-ignore: Incompatible parameter type [6] function_definition, "TaintSource[UserControlled]", # pyre-fixme[6]: Expected `Optional[tools.pyre.tools.generate_tai... "TaintSource[UserControlled]", "TaintSource[UserControlled]", ) } self.assertEqual( [ *map( str, FilteredSourceGenerator( superset_generator=RESTApiSourceGenerator( django_urls=MagicMock() ), subset_generator=( AnnotatedFreeFunctionWithDecoratorGenerator( root="/root", annotation_specifications=[] ) ), ).compute_models(all_functions), ) ], [], ) def test_compute_models_for_arbitrary_generators(self) -> None: class SupersetGenerator(ModelGenerator): def gather_functions_to_model(self) -> Iterable[Callable[..., object]]: return [] def compute_models( self, functions_to_model: Iterable[Callable[..., object]] ) -> Iterable[Model]: return [] def generate_models(self) -> Set[Model]: return { CallableModel( testB, parameter_annotation=AllParametersAnnotation( arg="TaintSource[Super]" ), ), CallableModel( testC, parameter_annotation=AllParametersAnnotation( arg="TaintSource[Super]" ), ), } class SubsetGenerator(ModelGenerator): def gather_functions_to_model(self) -> Iterable[Callable[..., object]]: return [] def compute_models( self, functions_to_model: Iterable[Callable[..., object]] ) -> Iterable[Model]: return [] def generate_models(self) -> Set[Model]: return { CallableModel( testC, parameter_annotation=AllParametersAnnotation( arg="TaintSource[Subset]" ), ) } self.assertEqual( [ str(model) for model in FilteredSourceGenerator( superset_generator=SupersetGenerator(), subset_generator=SubsetGenerator(), ).generate_models() ], [ "def tools.pyre.tools.generate_taint_models.tests.test_functions.testB(" "x: TaintSource[Super]): ..." ], )
GetFilteredSourcesTest
python
ray-project__ray
release/nightly_tests/decision_tree/cart_with_tree.py
{ "start": 3439, "end": 13857 }
class ____: def __init__(self, max_depth=None, tree_limit=5000, feature_limit=2000): self.max_depth = max_depth self.tree_limit = tree_limit self.feature_limit = feature_limit def fit(self, X, y): """Build decision tree classifier.""" self.n_classes_ = len(set(y)) # classes are assumed to go from 0 to n-1 self.n_features_ = X.shape[1] self.tree_ = self._grow_tree(X, y) def predict(self, X): """Predict class for X.""" return [self._predict(inputs) for inputs in X] def debug(self, feature_names, class_names, show_details=True): """Print ASCII visualization of decision tree.""" self.tree_.debug(feature_names, class_names, show_details) def _gini(self, y): """Compute Gini impurity of a non-empty node. Gini impurity is defined as Σ p(1-p) over all classes, with p the freq class within the node. Since Σ p = 1, this is equivalent to 1 - Σ p^2. """ m = y.size return 1.0 - sum((np.sum(y == c) / m) ** 2 for c in range(self.n_classes_)) def _best_split(self, X, y): return best_split(self, X, y) def _grow_tree(self, X, y, depth=0): future = grow_tree_remote.remote(self, X, y, depth) return ray.get(future) def _predict(self, inputs): """Predict class for a single sample.""" node = self.tree_ while node.left: if inputs[node.feature_index] < node.threshold: node = node.left else: node = node.right return node.predicted_class def grow_tree_local(tree, X, y, depth): """Build a decision tree by recursively finding the best split.""" # Population for each class in current node. The predicted class is the one # largest population. num_samples_per_class = [np.sum(y == i) for i in range(tree.n_classes_)] predicted_class = np.argmax(num_samples_per_class) node = Node( gini=tree._gini(y), num_samples=y.size, num_samples_per_class=num_samples_per_class, predicted_class=predicted_class, ) # Split recursively until maximum depth is reached. if depth < tree.max_depth: idx, thr = tree._best_split(X, y) if idx is not None: indices_left = X[:, idx] < thr X_left, y_left = X[indices_left], y[indices_left] X_right, y_right = X[~indices_left], y[~indices_left] node.feature_index = idx node.threshold = thr node.left = grow_tree_local(tree, X_left, y_left, depth + 1) node.right = grow_tree_local(tree, X_right, y_right, depth + 1) return node @ray.remote def grow_tree_remote(tree, X, y, depth=0): """Build a decision tree by recursively finding the best split.""" # Population for each class in current node. The predicted class is the one # largest population. num_samples_per_class = [np.sum(y == i) for i in range(tree.n_classes_)] predicted_class = np.argmax(num_samples_per_class) node = Node( gini=tree._gini(y), num_samples=y.size, num_samples_per_class=num_samples_per_class, predicted_class=predicted_class, ) # Split recursively until maximum depth is reached. if depth < tree.max_depth: idx, thr = tree._best_split(X, y) if idx is not None: indices_left = X[:, idx] < thr X_left, y_left = X[indices_left], y[indices_left] X_right, y_right = X[~indices_left], y[~indices_left] node.feature_index = idx node.threshold = thr if len(X_left) > tree.tree_limit or len(X_right) > tree.tree_limit: left_future = grow_tree_remote.remote(tree, X_left, y_left, depth + 1) right_future = grow_tree_remote.remote( tree, X_right, y_right, depth + 1 ) node.left = ray.get(left_future) node.right = ray.get(right_future) else: node.left = grow_tree_local(tree, X_left, y_left, depth + 1) node.right = grow_tree_local(tree, X_right, y_right, depth + 1) return node def best_split_original(tree, X, y): """Find the best split for a node.""" # Need at least two elements to split a node. m = y.size if m <= 1: return None, None # Count of each class in the current node. num_parent = [np.sum(y == c) for c in range(tree.n_classes_)] # Gini of current node. best_gini = 1.0 - sum((n / m) ** 2 for n in num_parent) best_idx, best_thr = None, None # Loop through all features. for idx in range(tree.n_features_): # Sort data along selected feature. thresholds, classes = zip(*sorted(zip(X[:, idx], y))) # print("Classes are: ", classes, " ", thresholds) # We could actually split the node according to each feature/threshold # and count the resulting population for each class in the children, # instead we compute them in an iterative fashion, making this for loop # linear rather than quadratic. num_left = [0] * tree.n_classes_ num_right = num_parent.copy() for i in range(1, m): # possible split positions c = classes[i - 1] # print("c is ", c, "num left is", len(num_left)) num_left[c] += 1 num_right[c] -= 1 gini_left = 1.0 - sum( (num_left[x] / i) ** 2 for x in range(tree.n_classes_) ) gini_right = 1.0 - sum( (num_right[x] / (m - i)) ** 2 for x in range(tree.n_classes_) ) # The Gini impurity of a split is the weighted average of the Gini # impurity of the children. gini = (i * gini_left + (m - i) * gini_right) / m # The following condition is to make sure we don't try to split two # points with identical values for that feature, as it is impossibl # (both have to end up on the same side of a split). if thresholds[i] == thresholds[i - 1]: continue if gini < best_gini: best_gini = gini best_idx = idx best_thr = (thresholds[i] + thresholds[i - 1]) / 2 # midpoint return best_idx, best_thr def best_split_for_idx(tree, idx, X, y, num_parent, best_gini): """Find the best split for a node and a given index""" # Sort data along selected feature. thresholds, classes = zip(*sorted(zip(X[:, idx], y))) # print("Classes are: ", classes, " ", thresholds) # We could actually split the node according to each feature/threshold pair # and count the resulting population for each class in the children, but # instead we compute them in an iterative fashion, making this for loop # linear rather than quadratic. m = y.size num_left = [0] * tree.n_classes_ num_right = num_parent.copy() best_thr = float("NaN") for i in range(1, m): # possible split positions c = classes[i - 1] # print("c is ", c, "num left is", len(num_left)) num_left[c] += 1 num_right[c] -= 1 gini_left = 1.0 - sum((num_left[x] / i) ** 2 for x in range(tree.n_classes_)) gini_right = 1.0 - sum( (num_right[x] / (m - i)) ** 2 for x in range(tree.n_classes_) ) # The Gini impurity of a split is the weighted average of the Gini # impurity of the children. gini = (i * gini_left + (m - i) * gini_right) / m # The following condition is to make sure we don't try to split two # points with identical values for that feature, as it is impossible # (both have to end up on the same side of a split). if thresholds[i] == thresholds[i - 1]: continue if gini < best_gini: best_gini = gini best_thr = (thresholds[i] + thresholds[i - 1]) / 2 # midpoint return best_gini, best_thr @ray.remote def best_split_for_idx_remote(tree, idx, X, y, num_parent, best_gini): return best_split_for_idx(tree, idx, X, y, num_parent, best_gini) def best_split(tree, X, y): """Find the best split for a node.""" # Need at least two elements to split a node. m = y.size if m <= 1: return None, None # Count of each class in the current node. num_parent = [np.sum(y == c) for c in range(tree.n_classes_)] # Gini of current node. best_gini = 1.0 - sum((n / m) ** 2 for n in num_parent) best_idx, best_thr = -1, best_gini if m > tree.feature_limit: split_futures = [ best_split_for_idx_remote.remote(tree, i, X, y, num_parent, best_gini) for i in range(tree.n_features_) ] best_splits = [ray.get(result) for result in split_futures] else: best_splits = [ best_split_for_idx(tree, i, X, y, num_parent, best_gini) for i in range(tree.n_features_) ] ginis = np.array([x for (x, _) in best_splits]) best_idx = np.argmin(ginis) best_thr = best_splits[best_idx][1] return best_idx, best_thr @ray.remote def run_in_cluster(): dataset = datasets.fetch_covtype(data_home=tempfile.mkdtemp()) X, y = dataset.data, dataset.target - 1 training_size = 400000 max_depth = 10 clf = DecisionTreeClassifier(max_depth=max_depth) start = time.time() clf.fit(X[:training_size], y[:training_size]) end = time.time() y_pred = clf.predict(X[training_size:]) accuracy = metrics.accuracy_score(y[training_size:], y_pred) return end - start, accuracy if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--concurrency", type=int, default=1) args = parser.parse_args() ray.init(address=os.environ["RAY_ADDRESS"]) futures = [] for i in range(args.concurrency): print(f"concurrent run: {i}") futures.append(run_in_cluster.remote()) time.sleep(10) for i, f in enumerate(futures): treetime, accuracy = ray.get(f) print(f"Tree {i} building took {treetime} seconds") print(f"Test Accuracy: {accuracy}") with open(os.environ["TEST_OUTPUT_JSON"], "w") as f: f.write(json.dumps({"build_time": treetime}))
DecisionTreeClassifier
python
PyCQA__pylint
tests/functional/p/protocol_classes.py
{ "start": 540, "end": 738 }
class ____(Protocol): """A hashing algorithm, e.g. :func:`hashlib.sha256`.""" def update(self, blob: bytes): # [unused-argument] ... def digest(self) -> bytes: ...
HasherFake
python
spyder-ide__spyder
spyder/plugins/projects/utils/config.py
{ "start": 2730, "end": 2932 }
class ____(MultiUserConfig): """Plugin configuration handler with multifile support.""" DEFAULT_FILE_NAME = WORKSPACE def get_config_class(self): return ProjectConfig
ProjectMultiConfig
python
astropy__astropy
astropy/cosmology/_src/tests/io/test_connect.py
{ "start": 1396, "end": 4495 }
class ____( test_ecsv.ReadWriteECSVTestMixin, test_html.ReadWriteHTMLTestMixin, test_json.ReadWriteJSONTestMixin, test_latex.WriteLATEXTestMixin, ): """ Tests for a CosmologyRead/Write on a |Cosmology|. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestReadWriteCosmology`` or ``TestCosmology`` for examples. """ @pytest.mark.parametrize("format, metaio, has_deps", readwrite_formats) def test_readwrite_complete_info(self, cosmo, tmp_path, format, metaio, has_deps): """ Test writing from an instance and reading from the base class. This requires full information. The round-tripped metadata can be in a different order, so the OrderedDict must be converted to a dict before testing equality. """ if not has_deps: pytest.skip("missing a dependency") if (format, Cosmology) not in readwrite_registry._readers: pytest.xfail(f"no read method is registered for format {format!r}") fname = tmp_path / f"{cosmo.name}.{format}" cosmo.write(fname, format=format) # Also test kwarg "overwrite" assert fname.is_file() with pytest.raises(IOError): cosmo.write(fname, format=format, overwrite=False) assert fname.exists() # overwrite file existing file cosmo.write(fname, format=format, overwrite=True) # Read back got = Cosmology.read(fname, format=format) assert got == cosmo assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta)) @pytest.mark.parametrize("format, metaio, has_deps", readwrite_formats) def test_readwrite_from_subclass_complete_info( self, cosmo_cls, cosmo, tmp_path, format, metaio, has_deps ): """ Test writing from an instance and reading from that class, when there's full information saved. """ if not has_deps: pytest.skip("missing a dependency") if (format, Cosmology) not in readwrite_registry._readers: pytest.xfail(f"no read method is registered for format {format!r}") fname = str(tmp_path / f"{cosmo.name}.{format}") cosmo.write(fname, format=format) # read with the same class that wrote. got = cosmo_cls.read(fname, format=format) assert got == cosmo assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta)) # this should be equivalent to got = Cosmology.read(fname, format=format, cosmology=cosmo_cls) assert got == cosmo assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta)) # and also got = Cosmology.read(fname, format=format, cosmology=cosmo_cls.__qualname__) assert got == cosmo assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
ReadWriteTestMixin
python
getsentry__responses
responses/tests/test_responses.py
{ "start": 76571, "end": 77314 }
class ____: """Validates that ``RequestsMock`` could be used as ``mock.patch``. This class is present as example in README.rst """ def setup_method(self): self.r_mock = responses.RequestsMock(assert_all_requests_are_fired=True) self.r_mock.start() self.r_mock.get("https://example.com", status=505) self.r_mock.put("https://example.com", status=506) def teardown_method(self): self.r_mock.stop() self.r_mock.reset() assert_reset() def test_function(self): resp = requests.get("https://example.com") assert resp.status_code == 505 resp = requests.put("https://example.com") assert resp.status_code == 506
TestUnitTestPatchSetup
python
weaviate__weaviate-python-client
weaviate/rbac/models.py
{ "start": 7883, "end": 8102 }
class ____(_Permission[UsersAction]): users: str def _to_weaviate(self) -> List[WeaviatePermission]: return [{"action": action, "users": {"users": self.users}} for action in self.actions]
_UsersPermission
python
hyperopt__hyperopt
hyperopt/exceptions.py
{ "start": 113, "end": 205 }
class ____(BadSearchSpace): """A search space included a duplicate label"""
DuplicateLabel
python
PrefectHQ__prefect
src/integrations/prefect-dbt/prefect_dbt/cloud/exceptions.py
{ "start": 630, "end": 790 }
class ____(DbtCloudException): """ Raised when a triggered job run does not complete in the configured max wait seconds """
DbtCloudJobRunTimedOut