language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
qdrant__qdrant-client
qdrant_client/http/api/snapshots_api.py
{ "start": 1315, "end": 14184 }
class ____: def __init__(self, api_client: "Union[ApiClient, AsyncApiClient]"): self.api_client = api_client def _build_for_create_full_snapshot( self, wait: bool = None, ): """ Create new snapshot of the whole storage """ query_params = {} if wait is not None: query_params["wait"] = str(wait).lower() headers = {} return self.api_client.request( type_=m.InlineResponse20011, method="POST", url="/snapshots", headers=headers if headers else None, params=query_params, ) def _build_for_create_shard_snapshot( self, collection_name: str, shard_id: int, wait: bool = None, ): """ Create new snapshot of a shard for a collection """ path_params = { "collection_name": str(collection_name), "shard_id": str(shard_id), } query_params = {} if wait is not None: query_params["wait"] = str(wait).lower() headers = {} return self.api_client.request( type_=m.InlineResponse20011, method="POST", url="/collections/{collection_name}/shards/{shard_id}/snapshots", headers=headers if headers else None, path_params=path_params, params=query_params, ) def _build_for_create_snapshot( self, collection_name: str, wait: bool = None, ): """ Create new snapshot for a collection """ path_params = { "collection_name": str(collection_name), } query_params = {} if wait is not None: query_params["wait"] = str(wait).lower() headers = {} return self.api_client.request( type_=m.InlineResponse20011, method="POST", url="/collections/{collection_name}/snapshots", headers=headers if headers else None, path_params=path_params, params=query_params, ) def _build_for_delete_full_snapshot( self, snapshot_name: str, wait: bool = None, ): """ Delete snapshot of the whole storage """ path_params = { "snapshot_name": str(snapshot_name), } query_params = {} if wait is not None: query_params["wait"] = str(wait).lower() headers = {} return self.api_client.request( type_=m.InlineResponse2009, method="DELETE", url="/snapshots/{snapshot_name}", headers=headers if headers else None, path_params=path_params, params=query_params, ) def _build_for_delete_shard_snapshot( self, collection_name: str, shard_id: int, snapshot_name: str, wait: bool = None, ): """ Delete snapshot of a shard for a collection """ path_params = { "collection_name": str(collection_name), "shard_id": str(shard_id), "snapshot_name": str(snapshot_name), } query_params = {} if wait is not None: query_params["wait"] = str(wait).lower() headers = {} return self.api_client.request( type_=m.InlineResponse2009, method="DELETE", url="/collections/{collection_name}/shards/{shard_id}/snapshots/{snapshot_name}", headers=headers if headers else None, path_params=path_params, params=query_params, ) def _build_for_delete_snapshot( self, collection_name: str, snapshot_name: str, wait: bool = None, ): """ Delete snapshot for a collection """ path_params = { "collection_name": str(collection_name), "snapshot_name": str(snapshot_name), } query_params = {} if wait is not None: query_params["wait"] = str(wait).lower() headers = {} return self.api_client.request( type_=m.InlineResponse2009, method="DELETE", url="/collections/{collection_name}/snapshots/{snapshot_name}", headers=headers if headers else None, path_params=path_params, params=query_params, ) def _build_for_get_full_snapshot( self, snapshot_name: str, ): """ Download specified snapshot of the whole storage as a file """ path_params = { "snapshot_name": str(snapshot_name), } headers = {} return self.api_client.request( type_=file, method="GET", url="/snapshots/{snapshot_name}", headers=headers if headers else None, path_params=path_params, ) def _build_for_get_shard_snapshot( self, collection_name: str, shard_id: int, snapshot_name: str, ): """ Download specified snapshot of a shard from a collection as a file """ path_params = { "collection_name": str(collection_name), "shard_id": str(shard_id), "snapshot_name": str(snapshot_name), } headers = {} return self.api_client.request( type_=file, method="GET", url="/collections/{collection_name}/shards/{shard_id}/snapshots/{snapshot_name}", headers=headers if headers else None, path_params=path_params, ) def _build_for_get_snapshot( self, collection_name: str, snapshot_name: str, ): """ Download specified snapshot from a collection as a file """ path_params = { "collection_name": str(collection_name), "snapshot_name": str(snapshot_name), } headers = {} return self.api_client.request( type_=file, method="GET", url="/collections/{collection_name}/snapshots/{snapshot_name}", headers=headers if headers else None, path_params=path_params, ) def _build_for_list_full_snapshots( self, ): """ Get list of snapshots of the whole storage """ headers = {} return self.api_client.request( type_=m.InlineResponse20010, method="GET", url="/snapshots", headers=headers if headers else None, ) def _build_for_list_shard_snapshots( self, collection_name: str, shard_id: int, ): """ Get list of snapshots for a shard of a collection """ path_params = { "collection_name": str(collection_name), "shard_id": str(shard_id), } headers = {} return self.api_client.request( type_=m.InlineResponse20010, method="GET", url="/collections/{collection_name}/shards/{shard_id}/snapshots", headers=headers if headers else None, path_params=path_params, ) def _build_for_list_snapshots( self, collection_name: str, ): """ Get list of snapshots for a collection """ path_params = { "collection_name": str(collection_name), } headers = {} return self.api_client.request( type_=m.InlineResponse20010, method="GET", url="/collections/{collection_name}/snapshots", headers=headers if headers else None, path_params=path_params, ) def _build_for_recover_from_snapshot( self, collection_name: str, wait: bool = None, snapshot_recover: m.SnapshotRecover = None, ): """ Recover local collection data from a snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created. """ path_params = { "collection_name": str(collection_name), } query_params = {} if wait is not None: query_params["wait"] = str(wait).lower() headers = {} body = jsonable_encoder(snapshot_recover) if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( type_=m.InlineResponse2009, method="PUT", url="/collections/{collection_name}/snapshots/recover", headers=headers if headers else None, path_params=path_params, params=query_params, content=body, ) def _build_for_recover_from_uploaded_snapshot( self, collection_name: str, wait: bool = None, priority: SnapshotPriority = None, checksum: str = None, snapshot: IO[Any] = None, ): """ Recover local collection data from an uploaded snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created. """ path_params = { "collection_name": str(collection_name), } query_params = {} if wait is not None: query_params["wait"] = str(wait).lower() if priority is not None: query_params["priority"] = str(priority) if checksum is not None: query_params["checksum"] = str(checksum) headers = {} files: Dict[str, IO[Any]] = {} # noqa F841 data: Dict[str, Any] = {} # noqa F841 if snapshot is not None: files["snapshot"] = snapshot return self.api_client.request( type_=m.InlineResponse2009, method="POST", url="/collections/{collection_name}/snapshots/upload", headers=headers if headers else None, path_params=path_params, params=query_params, data=data, files=files, ) def _build_for_recover_shard_from_snapshot( self, collection_name: str, shard_id: int, wait: bool = None, shard_snapshot_recover: m.ShardSnapshotRecover = None, ): """ Recover shard of a local collection data from a snapshot. This will overwrite any data, stored in this shard, for the collection. """ path_params = { "collection_name": str(collection_name), "shard_id": str(shard_id), } query_params = {} if wait is not None: query_params["wait"] = str(wait).lower() headers = {} body = jsonable_encoder(shard_snapshot_recover) if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( type_=m.InlineResponse2009, method="PUT", url="/collections/{collection_name}/shards/{shard_id}/snapshots/recover", headers=headers if headers else None, path_params=path_params, params=query_params, content=body, ) def _build_for_recover_shard_from_uploaded_snapshot( self, collection_name: str, shard_id: int, wait: bool = None, priority: SnapshotPriority = None, checksum: str = None, snapshot: IO[Any] = None, ): """ Recover shard of a local collection from an uploaded snapshot. This will overwrite any data, stored on this node, for the collection shard. """ path_params = { "collection_name": str(collection_name), "shard_id": str(shard_id), } query_params = {} if wait is not None: query_params["wait"] = str(wait).lower() if priority is not None: query_params["priority"] = str(priority) if checksum is not None: query_params["checksum"] = str(checksum) headers = {} files: Dict[str, IO[Any]] = {} # noqa F841 data: Dict[str, Any] = {} # noqa F841 if snapshot is not None: files["snapshot"] = snapshot return self.api_client.request( type_=m.InlineResponse2009, method="POST", url="/collections/{collection_name}/shards/{shard_id}/snapshots/upload", headers=headers if headers else None, path_params=path_params, params=query_params, data=data, files=files, )
_SnapshotsApi
python
facebookresearch__faiss
tests/test_build_blocks.py
{ "start": 15554, "end": 16922 }
class ____(unittest.TestCase): def do_test(self, ismax, dtype): rs = np.random.RandomState() n, k, nshard = 10, 5, 3 all_ids = rs.randint(100000, size=(nshard, n, k)).astype('int64') all_dis = rs.rand(nshard, n, k) if dtype == 'int32': all_dis = (all_dis * 1000000).astype("int32") else: all_dis = all_dis.astype(dtype) for i in range(nshard): for j in range(n): all_dis[i, j].sort() if ismax: all_dis[i, j] = all_dis[i, j][::-1] Dref = np.zeros((n, k), dtype=dtype) Iref = np.zeros((n, k), dtype='int64') for i in range(n): dis = all_dis[:, i, :].ravel() ids = all_ids[:, i, :].ravel() o = dis.argsort() if ismax: o = o[::-1] Dref[i] = dis[o[:k]] Iref[i] = ids[o[:k]] Dnew, Inew = faiss.merge_knn_results(all_dis, all_ids, keep_max=ismax) np.testing.assert_array_equal(Dnew, Dref) np.testing.assert_array_equal(Inew, Iref) def test_min_float(self): self.do_test(ismax=False, dtype='float32') def test_max_int(self): self.do_test(ismax=True, dtype='int32') def test_max_float(self): self.do_test(ismax=True, dtype='float32')
TestMergeKNNResults
python
PyCQA__pylint
tests/functional/u/unsupported/unsupported_assignment_operation.py
{ "start": 1894, "end": 1991 }
class ____(type): def __setitem__(cls, key, value): return key + value
MetaSubscriptable
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/compute.py
{ "start": 1611, "end": 1884 }
class ____(BaseGoogleLink): """Helper class for constructing Compute Instance Template details Link.""" name = "Compute Instance Template details" key = "compute_instance_template_details" format_str = COMPUTE_TEMPLATE_LINK
ComputeInstanceTemplateDetailsLink
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 817096, "end": 817543 }
class ____(VegaLiteSchema): """ OrderOnlyDef schema wrapper. Parameters ---------- sort : :class:`SortOrder`, Literal['ascending', 'descending'] The sort order. One of ``"ascending"`` (default) or ``"descending"``. """ _schema = {"$ref": "#/definitions/OrderOnlyDef"} def __init__(self, sort: Optional[SchemaBase | SortOrder_T] = Undefined, **kwds): super().__init__(sort=sort, **kwds)
OrderOnlyDef
python
huggingface__transformers
src/transformers/models/lilt/modeling_lilt.py
{ "start": 36481, "end": 37270 }
class ____(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x @auto_docstring
LiltClassificationHead
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/required1.py
{ "start": 1038, "end": 1275 }
class ____: # This should generate an error because Required can't be # used in this context. x: Required[int] # This should generate an error because NotRequired can't be # used in this context. y: Required[int]
Foo
python
doocs__leetcode
solution/2200-2299/2259.Remove Digit From Number to Maximize Result/Solution.py
{ "start": 0, "end": 190 }
class ____: def removeDigit(self, number: str, digit: str) -> str: return max( number[:i] + number[i + 1 :] for i, d in enumerate(number) if d == digit )
Solution
python
pypa__pipenv
pipenv/patched/pip/_internal/resolution/resolvelib/candidates.py
{ "start": 11523, "end": 12205 }
class ____(_InstallRequirementBackedCandidate): is_editable = True def __init__( self, link: Link, template: InstallRequirement, factory: "Factory", name: Optional[NormalizedName] = None, version: Optional[Version] = None, ) -> None: super().__init__( link=link, source_link=link, ireq=make_install_req_from_editable(link, template), factory=factory, name=name, version=version, ) def _prepare_distribution(self) -> BaseDistribution: return self._factory.preparer.prepare_editable_requirement(self._ireq)
EditableCandidate
python
Lightning-AI__lightning
tests/tests_pytorch/loops/test_fetchers.py
{ "start": 9246, "end": 10932 }
class ____(BoringModel): def __init__(self) -> None: super().__init__() self.automatic_optimization = False self.batch_i_handle = None self.num_batches_processed = 0 def _async_op(self, batch: Any) -> DummyWaitable: return DummyWaitable(val=batch) def training_step(self, dataloader_iter: Iterator) -> STEP_OUTPUT: if self.batch_i_handle is None: batch_i_raw, _, _ = next(dataloader_iter) self.num_batches_processed += 1 self.batch_i_handle = self._async_op(batch_i_raw) # Invariant: _async_op for batch[i] has been initiated batch_ip1_handle = None is_last = False try: batch_ip1_raw, _, _ = next(dataloader_iter) self.num_batches_processed += 1 batch_ip1_handle = self._async_op(batch_ip1_raw) except StopIteration: is_last = True batch_i = self.batch_i_handle.wait() loss = self.step(batch_i) loss.backward() self.optimizers().step() self.optimizers().zero_grad() self.batch_i_handle = batch_ip1_handle return {"loss": loss, "is_last": is_last} def train_dataloader(self): return DataLoader(RandomDataset(BATCH_SIZE, DATASET_LEN)) def test_training_step_with_dataloader_iter(tmp_path) -> None: """A baseline functional test for `training_step` with dataloader access.""" trainer = Trainer(max_epochs=1, default_root_dir=tmp_path, accelerator="cpu") m = AsyncBoringModel() trainer.fit(m) assert m.num_batches_processed == DATASET_LEN, f"Expect all {DATASET_LEN} batches to be processed."
AsyncBoringModel
python
huggingface__transformers
src/transformers/models/align/modeling_align.py
{ "start": 11014, "end": 12390 }
class ____(nn.Module): r""" This corresponds to the Squeeze and Excitement phase of each block in the original implementation. """ def __init__(self, config: AlignVisionConfig, in_dim: int, expand_dim: int, expand: bool = False): super().__init__() self.dim = expand_dim if expand else in_dim self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio)) self.squeeze = nn.AdaptiveAvgPool2d(output_size=1) self.reduce = nn.Conv2d( in_channels=self.dim, out_channels=self.dim_se, kernel_size=1, padding="same", ) self.expand = nn.Conv2d( in_channels=self.dim_se, out_channels=self.dim, kernel_size=1, padding="same", ) self.act_reduce = ACT2FN[config.hidden_act] self.act_expand = nn.Sigmoid() def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: inputs = hidden_states hidden_states = self.squeeze(hidden_states) hidden_states = self.reduce(hidden_states) hidden_states = self.act_reduce(hidden_states) hidden_states = self.expand(hidden_states) hidden_states = self.act_expand(hidden_states) hidden_states = torch.mul(inputs, hidden_states) return hidden_states
AlignVisionSqueezeExciteLayer
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP044.py
{ "start": 82, "end": 126 }
class ____(Generic[Unpack[Shape]]): pass
C
python
apache__airflow
airflow-ctl/src/airflowctl/api/operations.py
{ "start": 24519, "end": 26572 }
class ____(BaseOperations): """Variable operations.""" def get(self, variable_key: str) -> VariableResponse | ServerResponseError: """Get a variable.""" try: self.response = self.client.get(f"variables/{variable_key}") return VariableResponse.model_validate_json(self.response.content) except ServerResponseError as e: raise e def list(self) -> VariableCollectionResponse | ServerResponseError: """List all variables.""" return super().execute_list(path="variables", data_model=VariableCollectionResponse) def create(self, variable: VariableBody) -> VariableResponse | ServerResponseError: """Create a variable.""" try: self.response = self.client.post("variables", json=variable.model_dump(mode="json")) return VariableResponse.model_validate_json(self.response.content) except ServerResponseError as e: raise e def bulk(self, variables: BulkBodyVariableBody) -> BulkResponse | ServerResponseError: """CRUD multiple variables.""" try: self.response = self.client.patch("variables", json=variables.model_dump(mode="json")) return BulkResponse.model_validate_json(self.response.content) except ServerResponseError as e: raise e def delete(self, variable_key: str) -> str | ServerResponseError: """Delete a variable.""" try: self.client.delete(f"variables/{variable_key}") return variable_key except ServerResponseError as e: raise e def update(self, variable: VariableBody) -> VariableResponse | ServerResponseError: """Update a variable.""" try: self.response = self.client.patch( f"variables/{variable.key}", json=variable.model_dump(mode="json") ) return VariableResponse.model_validate_json(self.response.content) except ServerResponseError as e: raise e
VariablesOperations
python
numba__llvmlite
llvmlite/ir/values.py
{ "start": 22912, "end": 23594 }
class ____(set): """A set of string attribute. Only accept items listed in *_known*. Properties: * Iterate in sorted order """ _known = () def __init__(self, args=()): super().__init__() if isinstance(args, str): args = [args] for name in args: self.add(name) def _expand(self, name, typ): return name def add(self, name): if name not in self._known: raise ValueError('unknown attr {!r} for {}'.format(name, self)) return super(AttributeSet, self).add(name) def _to_list(self, typ): return [self._expand(i, typ) for i in sorted(self)]
AttributeSet
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_pubmed_id.py
{ "start": 483, "end": 1599 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.valid_pubmed_id" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): def matches_pubmed_id_regex(x): return bool(re.match(PUBMED_ID_REGEX, str(x))) return column.apply(lambda x: matches_pubmed_id_regex(x) if x else False) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidPubmedId
python
tensorflow__tensorflow
tensorflow/python/tpu/tpu_embedding_v3.py
{ "start": 5819, "end": 7342 }
class ____(saveable_object.SaveableObject): """Defines how to save and restore a shard of TPUEmbedding sharded variable.""" def __init__( self, variable: tf_variables.Variable, shard_id: int, num_shards: int, shard_dim: int, name: str, ): """Init TPUEmbeddingShardedSaveable.""" self._shard_id = shard_id self._variable = variable var_offset = [0] * len(variable.shape) # NOTE: always assume even sharding var_offset[shard_dim] = shard_id * variable.shape[shard_dim] fullshape = variable.shape.as_list() fullshape[shard_dim] = num_shards * fullshape[shard_dim] save_slice_info = tf_variables.Variable.SaveSliceInfo( full_name=name, full_shape=fullshape, var_offset=var_offset, var_shape=variable.shape.as_list(), ) spec = saveable_object.SaveSpec( tensor=variable.read_value, slice_spec=save_slice_info.spec, name=name, dtype=variable.dtype, device=variable.device, ) super().__init__(variable.read_value, [spec], name) def restore( self, restored_tensors: List[tensor.Tensor], restored_shapes: List[tensor_shape.TensorShape], ) -> Any: del restored_shapes restored_tensor = restored_tensors[0] return values_util.assign_on_device( self._variable.device, self._variable, restored_tensor ) def _fielddict(): return dataclasses.field(default_factory=dict) @dataclasses.dataclass
TPUEmbeddingShardedSaveable
python
gevent__gevent
src/greentest/3.12/test_ssl.py
{ "start": 203872, "end": 214902 }
class ____(unittest.TestCase): """Verify behavior of close sockets with received data before to the handshake. """ class SingleConnectionTestServerThread(threading.Thread): def __init__(self, *, name, call_after_accept, timeout=None): self.call_after_accept = call_after_accept self.received_data = b'' # set by .run() self.wrap_error = None # set by .run() self.listener = None # set by .start() self.port = None # set by .start() if timeout is None: self.timeout = support.SHORT_TIMEOUT else: self.timeout = timeout super().__init__(name=name) def __enter__(self): self.start() return self def __exit__(self, *args): try: if self.listener: self.listener.close() except OSError: pass self.join() self.wrap_error = None # avoid dangling references def start(self): self.ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) self.ssl_ctx.verify_mode = ssl.CERT_REQUIRED self.ssl_ctx.load_verify_locations(cafile=ONLYCERT) self.ssl_ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY) self.listener = socket.socket() self.port = socket_helper.bind_port(self.listener) self.listener.settimeout(self.timeout) self.listener.listen(1) super().start() def run(self): try: conn, address = self.listener.accept() except TimeoutError: # on timeout, just close the listener return finally: self.listener.close() with conn: if self.call_after_accept(conn): return try: tls_socket = self.ssl_ctx.wrap_socket(conn, server_side=True) except OSError as err: # ssl.SSLError inherits from OSError self.wrap_error = err else: try: self.received_data = tls_socket.recv(400) except OSError: pass # closed, protocol error, etc. def non_linux_skip_if_other_okay_error(self, err): if sys.platform == "linux": return # Expect the full test setup to always work on Linux. if (isinstance(err, ConnectionResetError) or (isinstance(err, OSError) and err.errno == errno.EINVAL) or re.search('wrong.version.number', str(getattr(err, "reason", "")), re.I)): # On Windows the TCP RST leads to a ConnectionResetError # (ECONNRESET) which Linux doesn't appear to surface to userspace. # If wrap_socket() winds up on the "if connected:" path and doing # the actual wrapping... we get an SSLError from OpenSSL. Typically # WRONG_VERSION_NUMBER. While appropriate, neither is the scenario # we're specifically trying to test. The way this test is written # is known to work on Linux. We'll skip it anywhere else that it # does not present as doing so. try: self.skipTest(f"Could not recreate conditions on {sys.platform}:" f" {err=}") finally: # gh-108342: Explicitly break the reference cycle err = None # If maintaining this conditional winds up being a problem. # just turn this into an unconditional skip anything but Linux. # The important thing is that our CI has the logic covered. def test_preauth_data_to_tls_server(self): server_accept_called = threading.Event() ready_for_server_wrap_socket = threading.Event() def call_after_accept(unused): server_accept_called.set() if not ready_for_server_wrap_socket.wait(support.SHORT_TIMEOUT): raise RuntimeError("wrap_socket event never set, test may fail.") return False # Tell the server thread to continue. server = self.SingleConnectionTestServerThread( call_after_accept=call_after_accept, name="preauth_data_to_tls_server") self.enterContext(server) # starts it & unittest.TestCase stops it. with socket.socket() as client: client.connect(server.listener.getsockname()) # This forces an immediate connection close via RST on .close(). set_socket_so_linger_on_with_zero_timeout(client) client.setblocking(False) server_accept_called.wait() client.send(b"DELETE /data HTTP/1.0\r\n\r\n") client.close() # RST ready_for_server_wrap_socket.set() server.join() wrap_error = server.wrap_error server.wrap_error = None try: self.assertEqual(b"", server.received_data) self.assertIsInstance(wrap_error, OSError) # All platforms. self.non_linux_skip_if_other_okay_error(wrap_error) self.assertIsInstance(wrap_error, ssl.SSLError) self.assertIn("before TLS handshake with data", wrap_error.args[1]) self.assertIn("before TLS handshake with data", wrap_error.reason) self.assertNotEqual(0, wrap_error.args[0]) self.assertIsNone(wrap_error.library, msg="attr must exist") finally: # gh-108342: Explicitly break the reference cycle wrap_error = None server = None def test_preauth_data_to_tls_client(self): server_can_continue_with_wrap_socket = threading.Event() client_can_continue_with_wrap_socket = threading.Event() def call_after_accept(conn_to_client): if not server_can_continue_with_wrap_socket.wait(support.SHORT_TIMEOUT): print("ERROR: test client took too long") # This forces an immediate connection close via RST on .close(). set_socket_so_linger_on_with_zero_timeout(conn_to_client) conn_to_client.send( b"HTTP/1.0 307 Temporary Redirect\r\n" b"Location: https://example.com/someone-elses-server\r\n" b"\r\n") conn_to_client.close() # RST client_can_continue_with_wrap_socket.set() return True # Tell the server to stop. server = self.SingleConnectionTestServerThread( call_after_accept=call_after_accept, name="preauth_data_to_tls_client") self.enterContext(server) # starts it & unittest.TestCase stops it. # Redundant; call_after_accept sets SO_LINGER on the accepted conn. set_socket_so_linger_on_with_zero_timeout(server.listener) with socket.socket() as client: client.connect(server.listener.getsockname()) server_can_continue_with_wrap_socket.set() if not client_can_continue_with_wrap_socket.wait(support.SHORT_TIMEOUT): self.fail("test server took too long") ssl_ctx = ssl.create_default_context() try: tls_client = ssl_ctx.wrap_socket( client, server_hostname="localhost") except OSError as err: # SSLError inherits from OSError wrap_error = err received_data = b"" else: wrap_error = None received_data = tls_client.recv(400) tls_client.close() server.join() try: self.assertEqual(b"", received_data) self.assertIsInstance(wrap_error, OSError) # All platforms. self.non_linux_skip_if_other_okay_error(wrap_error) self.assertIsInstance(wrap_error, ssl.SSLError) self.assertIn("before TLS handshake with data", wrap_error.args[1]) self.assertIn("before TLS handshake with data", wrap_error.reason) self.assertNotEqual(0, wrap_error.args[0]) self.assertIsNone(wrap_error.library, msg="attr must exist") finally: # gh-108342: Explicitly break the reference cycle with warnings_helper.check_no_resource_warning(self): wrap_error = None server = None def test_https_client_non_tls_response_ignored(self): server_responding = threading.Event() class SynchronizedHTTPSConnection(http.client.HTTPSConnection): def connect(self): # Call clear text HTTP connect(), not the encrypted HTTPS (TLS) # connect(): wrap_socket() is called manually below. http.client.HTTPConnection.connect(self) # Wait for our fault injection server to have done its thing. if not server_responding.wait(support.SHORT_TIMEOUT) and support.verbose: sys.stdout.write("server_responding event never set.") self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) def call_after_accept(conn_to_client): # This forces an immediate connection close via RST on .close(). set_socket_so_linger_on_with_zero_timeout(conn_to_client) conn_to_client.send( b"HTTP/1.0 402 Payment Required\r\n" b"\r\n") conn_to_client.close() # RST server_responding.set() return True # Tell the server to stop. timeout = 2.0 server = self.SingleConnectionTestServerThread( call_after_accept=call_after_accept, name="non_tls_http_RST_responder", timeout=timeout) self.enterContext(server) # starts it & unittest.TestCase stops it. # Redundant; call_after_accept sets SO_LINGER on the accepted conn. set_socket_so_linger_on_with_zero_timeout(server.listener) connection = SynchronizedHTTPSConnection( server.listener.getsockname()[0], port=server.port, context=ssl.create_default_context(), timeout=timeout, ) # There are lots of reasons this raises as desired, long before this # test was added. Sending the request requires a successful TLS wrapped # socket; that fails if the connection is broken. It may seem pointless # to test this. It serves as an illustration of something that we never # want to happen... properly not happening. with warnings_helper.check_no_resource_warning(self), \ self.assertRaises(OSError): connection.request("HEAD", "/test", headers={"Host": "localhost"}) response = connection.getresponse() server.join()
TestPreHandshakeClose
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py
{ "start": 10677, "end": 10811 }
class ____(tuple[*Ts]): def __new__(cls: type[Generic3]) -> Generic3: ... def __enter__(self: Generic3) -> Generic3: ...
Generic3
python
encode__django-rest-framework
tests/test_renderers.py
{ "start": 29646, "end": 34520 }
class ____(TestCase): def setUp(self): self.renderer = AdminRenderer() def test_render_when_resource_created(self): class DummyView(APIView): renderer_classes = (AdminRenderer, ) request = Request(HttpRequest()) request.build_absolute_uri = lambda: 'http://example.com' response = Response(status=201, headers={'Location': '/test'}) context = { 'view': DummyView(), 'request': request, 'response': response } result = self.renderer.render(data={'test': 'test'}, renderer_context=context) assert result == '' assert response.status_code == status.HTTP_303_SEE_OTHER assert response['Location'] == 'http://example.com' def test_render_dict(self): factory = APIRequestFactory() class DummyView(APIView): renderer_classes = (AdminRenderer, ) def get(self, request): return Response({'foo': 'a string'}) view = DummyView.as_view() request = factory.get('/') response = view(request) response.render() self.assertContains(response, '<tr><th>Foo</th><td>a string</td></tr>', html=True) def test_render_dict_with_items_key(self): factory = APIRequestFactory() class DummyView(APIView): renderer_classes = (AdminRenderer, ) def get(self, request): return Response({'items': 'a string'}) view = DummyView.as_view() request = factory.get('/') response = view(request) response.render() self.assertContains(response, '<tr><th>Items</th><td>a string</td></tr>', html=True) def test_render_dict_with_iteritems_key(self): factory = APIRequestFactory() class DummyView(APIView): renderer_classes = (AdminRenderer, ) def get(self, request): return Response({'iteritems': 'a string'}) view = DummyView.as_view() request = factory.get('/') response = view(request) response.render() self.assertContains(response, '<tr><th>Iteritems</th><td>a string</td></tr>', html=True) def test_get_result_url(self): factory = APIRequestFactory() class DummyGenericViewsetLike(APIView): lookup_field = 'test' def get(self, request): response = Response() response.view = self return response def reverse_action(view, *args, **kwargs): self.assertEqual(kwargs['kwargs']['test'], 1) return '/example/' # get the view instance instead of the view function view = DummyGenericViewsetLike.as_view() request = factory.get('/') response = view(request) view = response.view self.assertEqual(self.renderer.get_result_url({'test': 1}, view), '/example/') self.assertIsNone(self.renderer.get_result_url({}, view)) def test_get_result_url_no_result(self): factory = APIRequestFactory() class DummyView(APIView): lookup_field = 'test' def get(self, request): response = Response() response.view = self return response # get the view instance instead of the view function view = DummyView.as_view() request = factory.get('/') response = view(request) view = response.view self.assertIsNone(self.renderer.get_result_url({'test': 1}, view)) self.assertIsNone(self.renderer.get_result_url({}, view)) def test_get_context_result_urls(self): factory = APIRequestFactory() class DummyView(APIView): lookup_field = 'test' def reverse_action(view, url_name, args=None, kwargs=None): return '/%s/%d' % (url_name, kwargs['test']) # get the view instance instead of the view function view = DummyView.as_view() request = factory.get('/') response = view(request) data = [ {'test': 1}, {'url': '/example', 'test': 2}, {'url': None, 'test': 3}, {}, ] context = { 'view': DummyView(), 'request': Request(request), 'response': response } context = self.renderer.get_context(data, None, context) results = context['results'] self.assertEqual(len(results), 4) self.assertEqual(results[0]['url'], '/detail/1') self.assertEqual(results[1]['url'], '/example') self.assertEqual(results[2]['url'], None) self.assertNotIn('url', results[3]) @pytest.mark.skipif(not coreapi, reason='coreapi is not installed')
AdminRendererTests
python
dagster-io__dagster
python_modules/dagster/dagster/_core/types/pagination.py
{ "start": 1752, "end": 2312 }
class ____: """ Cursor class useful for paginating results based on a last seen value. """ value: Any def __str__(self) -> str: return self.to_string() def to_string(self) -> str: string_serialized = serialize_value(self) return base64.b64encode(bytes(string_serialized, encoding="utf-8")).decode( "utf-8" ) @classmethod def from_cursor(cls, cursor: str): return deserialize_value(base64.b64decode(cursor).decode("utf-8"), cls) @whitelist_for_serdes @record
ValueIndexCursor
python
getsentry__sentry
src/sentry/integrations/base.py
{ "start": 2192, "end": 3429 }
class ____(NamedTuple): description: str | _StrPromise # A markdown description of the integration features: Sequence[FeatureDescription] # A list of FeatureDescriptions author: str # The integration author's name noun: str | _StrPromise # The noun used to identify the integration issue_url: str # URL where issues should be opened source_url: str # URL to view the source aspects: dict[str, Any] # A map of integration specific 'aspects' to the aspect config. @staticmethod def feature_flag_name(f: str | None) -> str | None: """ FeatureDescriptions are set using the IntegrationFeatures constants, however we expose them here as mappings to organization feature flags, thus we prefix them with `integration`. """ if f is not None: return f"integrations-{f}" return None def asdict(self) -> dict[str, Any]: metadata = self._asdict() metadata["features"] = [ { "description": f.description.strip(), "featureGate": self.feature_flag_name(f.featureGate.value), } for f in self.features ] return metadata
IntegrationMetadata
python
has2k1__plotnine
plotnine/scales/scale_color.py
{ "start": 8136, "end": 8290 }
class ____(scale_color_gradient2): """ Create a 3 point diverging color gradient """ _aesthetics = ["fill"] @dataclass
scale_fill_gradient2
python
apache__airflow
providers/google/tests/unit/google/cloud/hooks/vertex_ai/test_custom_job.py
{ "start": 7527, "end": 12075 }
class ____: def setup_method(self): with mock.patch( BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_no_default_project_id ): self.hook = CustomJobHook(gcp_conn_id=TEST_GCP_CONN_ID) @mock.patch(CUSTOM_JOB_STRING.format("CustomJobHook.get_pipeline_service_client")) def test_cancel_training_pipeline(self, mock_client) -> None: self.hook.cancel_training_pipeline( project_id=TEST_PROJECT_ID, region=TEST_REGION, training_pipeline=TEST_TRAINING_PIPELINE_NAME, ) mock_client.assert_called_once_with(TEST_REGION) mock_client.return_value.cancel_training_pipeline.assert_called_once_with( request=dict( name=mock_client.return_value.training_pipeline_path.return_value, ), metadata=(), retry=DEFAULT, timeout=None, ) mock_client.return_value.training_pipeline_path.assert_called_once_with( TEST_PROJECT_ID, TEST_REGION, TEST_TRAINING_PIPELINE_NAME ) @mock.patch(CUSTOM_JOB_STRING.format("CustomJobHook.get_pipeline_service_client")) def test_create_training_pipeline(self, mock_client) -> None: self.hook.create_training_pipeline( project_id=TEST_PROJECT_ID, region=TEST_REGION, training_pipeline=TEST_TRAINING_PIPELINE, ) mock_client.assert_called_once_with(TEST_REGION) mock_client.return_value.create_training_pipeline.assert_called_once_with( request=dict( parent=mock_client.return_value.common_location_path.return_value, training_pipeline=TEST_TRAINING_PIPELINE, ), metadata=(), retry=DEFAULT, timeout=None, ) mock_client.return_value.common_location_path.assert_called_once_with(TEST_PROJECT_ID, TEST_REGION) @mock.patch(CUSTOM_JOB_STRING.format("CustomJobHook.get_pipeline_service_client")) def test_delete_training_pipeline(self, mock_client) -> None: self.hook.delete_training_pipeline( project_id=TEST_PROJECT_ID, region=TEST_REGION, training_pipeline=TEST_TRAINING_PIPELINE_NAME, ) mock_client.assert_called_once_with(TEST_REGION) mock_client.return_value.delete_training_pipeline.assert_called_once_with( request=dict( name=mock_client.return_value.training_pipeline_path.return_value, ), metadata=(), retry=DEFAULT, timeout=None, ) mock_client.return_value.training_pipeline_path.assert_called_once_with( TEST_PROJECT_ID, TEST_REGION, TEST_TRAINING_PIPELINE_NAME ) @mock.patch(CUSTOM_JOB_STRING.format("CustomJobHook.get_pipeline_service_client")) def test_get_training_pipeline(self, mock_client) -> None: self.hook.get_training_pipeline( project_id=TEST_PROJECT_ID, region=TEST_REGION, training_pipeline=TEST_TRAINING_PIPELINE_NAME, ) mock_client.assert_called_once_with(TEST_REGION) mock_client.return_value.get_training_pipeline.assert_called_once_with( request=dict( name=mock_client.return_value.training_pipeline_path.return_value, ), metadata=(), retry=DEFAULT, timeout=None, ) mock_client.return_value.training_pipeline_path.assert_called_once_with( TEST_PROJECT_ID, TEST_REGION, TEST_TRAINING_PIPELINE_NAME ) @mock.patch(CUSTOM_JOB_STRING.format("CustomJobHook.get_pipeline_service_client")) def test_list_training_pipelines(self, mock_client) -> None: self.hook.list_training_pipelines( project_id=TEST_PROJECT_ID, region=TEST_REGION, ) mock_client.assert_called_once_with(TEST_REGION) mock_client.return_value.list_training_pipelines.assert_called_once_with( request=dict( parent=mock_client.return_value.common_location_path.return_value, page_size=None, page_token=None, filter=None, read_mask=None, ), metadata=(), retry=DEFAULT, timeout=None, ) mock_client.return_value.common_location_path.assert_called_once_with(TEST_PROJECT_ID, TEST_REGION)
TestCustomJobWithoutDefaultProjectIdHook
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mssql/pyodbc.py
{ "start": 18929, "end": 19002 }
class ____(_ODBCDateTimeBindProcessor, _MSDateTime): pass
_ODBCDateTime
python
numpy__numpy
numpy/lib/tests/test_shape_base.py
{ "start": 20397, "end": 21054 }
class ____: """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, vsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, vsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) try: vsplit(a, 2) assert_(0) except ValueError: pass def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = vsplit(a, 2) desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] compare_results(res, desired)
TestVsplit
python
airbytehq__airbyte
airbyte-ci/connectors/metadata_service/lib/metadata_service/templates/render.py
{ "start": 1372, "end": 2873 }
class ____: column: str title: str formatter: Optional[Callable[[Any], str]] = None def dataframe_to_table_html(df: pd.DataFrame, column_mapping: List[ColumnInfo]) -> str: """ Convert a dataframe to an HTML table. """ # convert true and false to checkmarks and x's df.replace({True: "✅", False: "❌"}, inplace=True) title_mapping = {column_info["column"]: column_info["title"] for column_info in column_mapping} df.rename(columns=title_mapping, inplace=True) # explicit type decl to satisfy the type checker html_formatters: dict[Hashable, Callable[[Any], str]] = { column_info["title"]: column_info["formatter"] for column_info in column_mapping if "formatter" in column_info } columns = [column_info["title"] for column_info in column_mapping] return df.to_html( columns=columns, justify="left", index=False, formatters=html_formatters, escape=False, classes="styled-table", na_rep="❌", render_links=True, ) # Templates def render_connector_registry_locations_html(destinations_table_html: str, sources_table_html: str) -> str: # yes, we really are dynamically loading the package env = Environment(loader=PackageLoader("metadata_service", "templates")) template = env.get_template("connector_registry_locations.html") return template.render(destinations_table_html=destinations_table_html, sources_table_html=sources_table_html)
ColumnInfo
python
HypothesisWorks__hypothesis
hypothesis-python/tests/attrs/test_pretty.py
{ "start": 1177, "end": 1710 }
class ____: a: int b: int c: int d: int e: int f: int g: int h: int i: int j: int k: int l: int m: int n: int o: int p: int q: int r: int s: int def test_will_line_break_between_fields(): obj = SomeAttrsClassWithLotsOfFields( **{ at.name: 12345678900000000000000001 for at in SomeAttrsClassWithLotsOfFields.__attrs_attrs__ } ) assert "\n" in pretty.pretty(obj) @attrs.define
SomeAttrsClassWithLotsOfFields
python
scipy__scipy
scipy/integrate/_rules/_base.py
{ "start": 9001, "end": 12275 }
class ____(FixedRule): r""" A cubature rule with error estimate given by the difference between two underlying fixed rules. If constructed as ``NestedFixedRule(higher, lower)``, this will use:: estimate(f, a, b) := higher.estimate(f, a, b) estimate_error(f, a, b) := \|higher.estimate(f, a, b) - lower.estimate(f, a, b)| (where the absolute value is taken elementwise). Attributes ---------- higher : Rule Higher accuracy rule. lower : Rule Lower accuracy rule. See Also -------- GaussKronrodQuadrature Examples -------- >>> from scipy.integrate import cubature >>> from scipy.integrate._rules import ( ... GaussLegendreQuadrature, NestedFixedRule, ProductNestedFixed ... ) >>> higher = GaussLegendreQuadrature(10) >>> lower = GaussLegendreQuadrature(5) >>> rule = NestedFixedRule( ... higher, ... lower ... ) >>> rule_2d = ProductNestedFixed([rule, rule]) """ def __init__(self, higher, lower): self.higher = higher self.lower = lower self.xp = None @property def nodes_and_weights(self): if self.higher is not None: return self.higher.nodes_and_weights else: raise NotImplementedError @property def lower_nodes_and_weights(self): if self.lower is not None: return self.lower.nodes_and_weights else: raise NotImplementedError def estimate_error(self, f, a, b, args=()): r""" Estimate the error of the approximation for the integral of `f` in rectangular region described by corners `a` and `b`. Parameters ---------- f : callable Function to estimate error for. `f` must have the signature:: f(x : ndarray, \*args) -> ndarray `f` should accept arrays `x` of shape:: (npoints, ndim) and output arrays of shape:: (npoints, output_dim_1, ..., output_dim_n) In this case, `estimate` will return arrays of shape:: (output_dim_1, ..., output_dim_n) a, b : ndarray Lower and upper limits of integration as rank-1 arrays specifying the left and right endpoints of the intervals being integrated over. Infinite limits are currently not supported. args : tuple, optional Additional positional args passed to `f`, if any. Returns ------- err_est : ndarray Result of error estimation. If `f` returns arrays of shape ``(npoints, output_dim_1, ..., output_dim_n)``, then `est` will be of shape ``(output_dim_1, ..., output_dim_n)``. """ nodes, weights = self.nodes_and_weights lower_nodes, lower_weights = self.lower_nodes_and_weights if self.xp is None: self.xp = array_namespace(nodes) error_nodes = self.xp.concat([nodes, lower_nodes], axis=0) error_weights = self.xp.concat([weights, -lower_weights], axis=0) return self.xp.abs( _apply_fixed_rule(f, a, b, error_nodes, error_weights, args, self.xp) )
NestedFixedRule
python
getsentry__sentry
src/sentry/db/models/fields/bounded.py
{ "start": 1764, "end": 2019 }
class ____(models.AutoField): MAX_VALUE = I32_MAX def get_prep_value(self, value: int) -> int: if value: value = int(value) assert value <= self.MAX_VALUE return super().get_prep_value(value)
BoundedAutoField
python
pytest-dev__pytest-xdist
testing/acceptance_test.py
{ "start": 12284, "end": 13608 }
class ____: def test_simple(self, pytester: pytest.Pytester) -> None: pytester.makepyfile( """ def test_hello(): pass """ ) result = pytester.runpytest_subprocess("--debug", "--dist=each", "--tx=2*popen") assert not result.ret result.stdout.fnmatch_lines(["*2 pass*"]) @pytest.mark.xfail( run=False, reason="other python versions might not have pytest installed" ) def test_simple_diffoutput(self, pytester: pytest.Pytester) -> None: interpreters = [] for name in ("python2.5", "python2.6"): interp = shutil.which(name) if interp is None: pytest.skip("%s not found" % name) interpreters.append(interp) pytester.makepyfile( __init__="", test_one=""" import sys def test_hello(): print("%s...%s" % sys.version_info[:2]) assert 0 """, ) args = ["--dist=each", "-v"] args += ["--tx", "popen//python=%s" % interpreters[0]] args += ["--tx", "popen//python=%s" % interpreters[1]] result = pytester.runpytest(*args) s = result.stdout.str() assert "2...5" in s assert "2...6" in s
TestDistEach
python
great-expectations__great_expectations
great_expectations/render/components.py
{ "start": 21435, "end": 22593 }
class ____(RenderedComponentContent): def __init__( self, text, header=None, subheader=None, styling=None, content_block_type="text" ) -> None: super().__init__(content_block_type=content_block_type, styling=styling) self.text = text self.header = header self.subheader = subheader @override def to_json_dict(self) -> dict[str, JSONValues]: """Returns a JSON-serializable dict representation of this TextContent. Returns: A JSON-serializable dict representation of this TextContent. """ d = super().to_json_dict() if self.header is not None: if isinstance(self.header, RenderedContent): d["header"] = self.header.to_json_dict() else: d["header"] = self.header if self.subheader is not None: if isinstance(self.subheader, RenderedContent): d["subheader"] = self.subheader.to_json_dict() else: d["subheader"] = self.subheader d["text"] = RenderedContent.rendered_content_list_to_json(self.text) return d
TextContent
python
huggingface__transformers
src/transformers/models/dinat/modeling_dinat.py
{ "start": 22409, "end": 25305 }
class ____(DinatPreTrainedModel): def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) requires_backends(self, ["natten"]) self.config = config self.num_levels = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1)) self.embeddings = DinatEmbeddings(config) self.encoder = DinatEncoder(config) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, DinatModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return DinatModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) @auto_docstring( custom_intro=""" Dinat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """ )
DinatModel
python
charliermarsh__ruff
crates/ruff_python_formatter/resources/test/fixtures/black/cases/torture.py
{ "start": 330, "end": 1044 }
class ____: def foo(self): for _ in range(10): aaaaaaaaaaaaaaaaaaa = bbbbbbbbbbbbbbb.cccccccccc( # pylint: disable=no-member xxxxxxxxxxxx ) def test(self, othr): return (1 == 2 and (name, description, self.default, self.selected, self.auto_generated, self.parameters, self.meta_data, self.schedule) == (name, description, othr.default, othr.selected, othr.auto_generated, othr.parameters, othr.meta_data, othr.schedule)) assert ( a_function(very_long_arguments_that_surpass_the_limit, which_is_eighty_eight_in_this_case_plus_a_bit_more) == {"x": "this need to pass the line limit as well", "b": "but only by a little bit"} )
A
python
PyCQA__pylint
tests/functional/n/non/non_iterator_returned.py
{ "start": 1103, "end": 1253 }
class ____: """__iter__ returns a class which uses an iterator-metaclass.""" def __iter__(self): return IteratorClass
FifthGoodIterator
python
dagster-io__dagster
python_modules/dagster/dagster/_core/errors.py
{ "start": 20022, "end": 20162 }
class ____(DagsterUserCodeExecutionError): """Errors raised in a user process during the loading of user code."""
DagsterUserCodeLoadError
python
allegroai__clearml
clearml/utilities/pyhocon/config_parser.py
{ "start": 29631, "end": 29986 }
class ____(TokenConverter): def __init__(self, expr=None): super(ConcatenatedValueParser, self).__init__(expr) self.parent = None self.key = None def postParse(self, instring, loc, token_list): config_values = ConfigValues(token_list, instring, loc) return [config_values.transform()]
ConcatenatedValueParser
python
spyder-ide__spyder
spyder/plugins/shortcuts/widgets/table.py
{ "start": 4839, "end": 17727 }
class ____(QDialog): """A dialog for entering key sequences.""" def __init__(self, parent, context, name, sequence, shortcuts): super().__init__(parent) self._parent = parent self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint) self.context = context self.name = name self.shortcuts = shortcuts self.current_sequence = sequence or _('<None>') self._qsequences = list() self.setup() self.update_warning() @property def new_sequence(self): """Return a string representation of the new key sequence.""" return ', '.join(self._qsequences) @property def new_qsequence(self): """Return the QKeySequence object of the new key sequence.""" return QKeySequence(self.new_sequence) def setup(self): """Setup the ShortcutEditor with the provided arguments.""" # Widgets self.label_info = QLabel() self.label_info.setText( _( "Press the new shortcut and select <b>Ok</b> to confirm, " "click <b>Cancel</b> to revert to the previous state, " "use <b>Clear</b> to unbind the command from a shortcut " "or press <b>Default</b> to restore the default shortcut." ) ) self.label_info.setAlignment(Qt.AlignTop | Qt.AlignLeft) self.label_info.setWordWrap(True) layout_info = QHBoxLayout() layout_info.setContentsMargins(0, 0, 0, 0) layout_info.addWidget(self.label_info) layout_info.setStretch(1, 100) self.label_current_sequence = QLabel(_("Current shortcut:")) self.text_current_sequence = QLabel(self.current_sequence) self.label_new_sequence = QLabel(_("New shortcut:")) self.text_new_sequence = ShortcutLineEdit(self) self.text_new_sequence.setPlaceholderText(_("Press shortcut.")) self.helper_button = IconWidget(parent=self) self.helper_button.setIcon(QIcon()) self.label_warning = QLabel() self.label_warning.setWordWrap(True) self.label_warning.setAlignment(Qt.AlignTop | Qt.AlignLeft) self.button_default = QPushButton(_('Default')) self.button_clear = QPushButton(_('Clear')) self.button_ok = QPushButton(_('Ok')) self.button_ok.setEnabled(False) self.button_cancel = QPushButton(_('Cancel')) button_box = QHBoxLayout() button_box.addWidget(self.button_default) button_box.addWidget(self.button_clear) button_box.addStretch(100) button_box.addWidget(self.button_ok) button_box.addWidget(self.button_cancel) # New Sequence button box self.btn_clear_sequence = create_toolbutton( self, icon=ima.icon('filecloseall'), tip=_("Clear all entered key sequences"), triggered=self.clear_new_sequence ) self.button_back_sequence = create_toolbutton( self, icon=ima.icon('fileclose'), tip=_("Remove last key sequence entered"), triggered=self.back_new_sequence ) newseq_btnbar = QHBoxLayout() newseq_btnbar.setSpacing(3) newseq_btnbar.setContentsMargins(0, 0, 0, 0) newseq_btnbar.addWidget(self.button_back_sequence) newseq_btnbar.addWidget(self.btn_clear_sequence) # Setup widgets self.setWindowTitle(_('Shortcut: {0}').format(self.name)) # Layout layout_sequence = QGridLayout() layout_sequence.setContentsMargins(0, 0, 0, 0) layout_sequence.addLayout(layout_info, 0, 0, 1, 4) layout_sequence.addItem(QSpacerItem(15, 15), 1, 0, 1, 4) layout_sequence.addWidget(self.label_current_sequence, 2, 0) layout_sequence.addWidget(self.text_current_sequence, 2, 2) layout_sequence.addWidget(self.label_new_sequence, 3, 0) layout_sequence.addWidget(self.helper_button, 3, 1) layout_sequence.addWidget(self.text_new_sequence, 3, 2) layout_sequence.addLayout(newseq_btnbar, 3, 3) layout_sequence.addWidget(self.label_warning, 4, 2, 1, 2) layout_sequence.setColumnStretch(2, 100) layout_sequence.setRowStretch(4, 100) layout = QVBoxLayout(self) layout.addLayout(layout_sequence) layout.addSpacing(10) layout.addLayout(button_box) layout.setSizeConstraint(QLayout.SizeConstraint.SetFixedSize) # Signals self.button_ok.clicked.connect(self.accept_override) self.button_clear.clicked.connect(self.unbind_shortcut) self.button_cancel.clicked.connect(self.reject) self.button_default.clicked.connect(self.set_sequence_to_default) # Set all widget to no focus so that we can register <Tab> key # press event. widgets = ( self.label_warning, self.helper_button, self.text_new_sequence, self.button_clear, self.button_default, self.button_cancel, self.button_ok, self.btn_clear_sequence, self.button_back_sequence) for w in widgets: w.setFocusPolicy(Qt.NoFocus) w.clearFocus() @Slot() def reject(self): """Slot for rejected signal.""" # Added for spyder-ide/spyder#5426. Due to the focusPolicy of # Qt.NoFocus for the buttons, if the cancel button was clicked without # first setting focus to the button, it would cause a seg fault crash. self.button_cancel.setFocus() super().reject() @Slot() def accept(self): """Slot for accepted signal.""" # Added for spyder-ide/spyder#5426. Due to the focusPolicy of # Qt.NoFocus for the buttons, if the cancel button was clicked without # first setting focus to the button, it would cause a seg fault crash. self.button_ok.setFocus() super().accept() def event(self, event): """Qt method override.""" # We reroute all ShortcutOverride events to our keyPressEvent and block # any KeyPress and Shortcut event. This allows to register default # Qt shortcuts for which no key press event are emitted. # See spyder-ide/spyder/issues/10786. if event.type() == QEvent.ShortcutOverride: self.keyPressEvent(event) return True elif event.type() in [QEvent.KeyPress, QEvent.Shortcut]: return True else: return super().event(event) def keyPressEvent(self, event): """Qt method override.""" event_key = event.key() if not event_key or event_key == Qt.Key_unknown: return if len(self._qsequences) == 4: # QKeySequence accepts a maximum of 4 different sequences. return if event_key in [Qt.Key_Control, Qt.Key_Shift, Qt.Key_Alt, Qt.Key_Meta]: # The event corresponds to just and only a special key. return translator = ShortcutTranslator() event_keyseq = translator.keyevent_to_keyseq(event) event_keystr = event_keyseq.toString(QKeySequence.PortableText) self._qsequences.append(event_keystr) self.update_warning() def check_conflicts(self): """Check shortcuts for conflicts.""" conflicts = [] if len(self._qsequences) == 0: return conflicts new_qsequence = self.new_qsequence no_match = QKeySequence.SequenceMatch.NoMatch for shortcut in self.shortcuts: shortcut_qsequence = QKeySequence.fromString(str(shortcut.key)) if shortcut_qsequence.isEmpty(): continue if (shortcut.context, shortcut.name) == (self.context, self.name): continue if shortcut.context in [self.context, '_'] or self.context == '_': if (shortcut_qsequence.matches(new_qsequence) != no_match or new_qsequence.matches(shortcut_qsequence) != no_match): conflicts.append(shortcut) return conflicts def check_ascii(self): """ Check that all characters in the new sequence are ascii or else the shortcut will not work. """ try: self.new_sequence.encode('ascii') except UnicodeEncodeError: return False else: return True def check_singlekey(self): """Check if the first sub-sequence of the new key sequence is valid.""" if len(self._qsequences) == 0: return True else: keystr = self._qsequences[0] valid_single_keys = (EDITOR_SINGLE_KEYS if self.context == 'editor' else SINGLE_KEYS) if any((m in keystr for m in ('Ctrl', 'Alt', 'Shift', 'Meta'))): return True else: # This means that the the first subsequence is composed of # a single key with no modifier. valid_single_keys = (EDITOR_SINGLE_KEYS if self.context == 'editor' else SINGLE_KEYS) if any((k == keystr for k in valid_single_keys)): return True else: return False def update_warning(self): """Update the warning label, buttons state and sequence text.""" new_qsequence = self.new_qsequence new_sequence = self.new_sequence self.text_new_sequence.setText( new_qsequence.toString(QKeySequence.NativeText)) conflicts = self.check_conflicts() if len(self._qsequences) == 0: warning = SEQUENCE_EMPTY tip = '' icon = QIcon() elif conflicts: warning = SEQUENCE_CONFLICT template = '<p style="margin-bottom: 5px">{0}</p>{1}{2}' tip_title = _('This key sequence conflicts with:') tip_body = '' for s in conflicts: tip_body += '&nbsp;' * 2 tip_body += ' - {0}: <b>{1}</b><br>'.format(s.context, s.name) tip_body += '<br>' if len(conflicts) == 1: tip_override = _( "Press 'Ok' to unbind it and assign the shortcut to" ) else: tip_override = _( "Press 'Ok' to unbind them and assign the shortcut to" ) tip_override += ' <b>{}</b>.'.format(self.name) tip = template.format(tip_title, tip_body, tip_override) icon = ima.icon('warning') elif new_sequence in BLACKLIST: warning = IN_BLACKLIST tip = _('This key sequence is forbidden.') icon = ima.icon('warning') elif self.check_singlekey() is False or self.check_ascii() is False: warning = INVALID_KEY tip = _('This key sequence is invalid.') icon = ima.icon('warning') else: warning = NO_WARNING tip = _('This key sequence is valid.') icon = ima.icon('dependency_ok') self.warning = warning self.conflicts = conflicts self.helper_button.setIcon(icon) self.button_ok.setEnabled( self.warning in [NO_WARNING, SEQUENCE_CONFLICT]) self.label_warning.setText(tip) def set_sequence_from_str(self, sequence): """ This is a convenience method to set the new QKeySequence of the shortcut editor from a string. """ self._qsequences = [QKeySequence(s) for s in sequence.split(', ')] self.update_warning() def set_sequence_to_default(self): """Set the new sequence to the default value defined in the config.""" sequence = CONF.get_default( 'shortcuts', "{}/{}".format(self.context, self.name)) if sequence and sequence is not NoDefault: self._qsequences = sequence.split(', ') self.update_warning() else: self.unbind_shortcut() def back_new_sequence(self): """Remove the last subsequence from the sequence compound.""" self._qsequences = self._qsequences[:-1] self.update_warning() def clear_new_sequence(self): """Clear the new sequence.""" self._qsequences = [] self.update_warning() def unbind_shortcut(self): """Unbind the shortcut.""" self._qsequences = [] self.accept() def accept_override(self): """Unbind all conflicted shortcuts, and accept the new one""" conflicts = self.check_conflicts() if conflicts: for shortcut in conflicts: shortcut.key = '' self.accept()
ShortcutEditor
python
Farama-Foundation__Gymnasium
gymnasium/spaces/text.py
{ "start": 337, "end": 9681 }
class ____(Space[str]): r"""A space representing a string comprised of characters from a given charset. Example: >>> from gymnasium.spaces import Text >>> # {"", "B5", "hello", ...} >>> Text(5) Text(1, 5, charset=0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz) >>> # {"0", "42", "0123456789", ...} >>> import string >>> Text(min_length = 1, ... max_length = 10, ... charset = string.digits) Text(1, 10, charset=0123456789) """ def __init__( self, max_length: int, *, min_length: int = 1, charset: frozenset[str] | str = alphanumeric, seed: int | np.random.Generator | None = None, ): r"""Constructor of :class:`Text` space. Both bounds for text length are inclusive. Args: min_length (int): Minimum text length (in characters). Defaults to 1 to prevent empty strings. max_length (int): Maximum text length (in characters). charset (Union[set], str): Character set, defaults to the lower and upper english alphabet plus latin digits. seed: The seed for sampling from the space. """ assert np.issubdtype( type(min_length), np.integer ), f"Expects the min_length to be an integer, actual type: {type(min_length)}" assert np.issubdtype( type(max_length), np.integer ), f"Expects the max_length to be an integer, actual type: {type(max_length)}" assert ( 0 <= min_length ), f"Minimum text length must be non-negative, actual value: {min_length}" assert ( min_length <= max_length ), f"The min_length must be less than or equal to the max_length, min_length: {min_length}, max_length: {max_length}" self.min_length: int = int(min_length) self.max_length: int = int(max_length) self._char_set: frozenset[str] = frozenset(charset) self._char_list: tuple[str, ...] = tuple(charset) self._char_index: dict[str, np.int32] = { val: np.int32(i) for i, val in enumerate(tuple(charset)) } self._char_str: str = "".join(sorted(tuple(charset))) # As the shape is dynamic (between min_length and max_length) then None super().__init__(dtype=str, seed=seed) def sample( self, mask: None | (tuple[int | None, NDArray[np.int8] | None]) = None, probability: None | (tuple[int | None, NDArray[np.float64] | None]) = None, ) -> str: """Generates a single random sample from this space with by default a random length between ``min_length`` and ``max_length`` and sampled from the ``charset``. Args: mask: An optional tuples of length and mask for the text. The length is expected to be between the ``min_length`` and ``max_length``. Otherwise, a random integer between ``min_length`` and ``max_length`` is selected. For the mask, we expect a numpy array of length of the charset passed with ``dtype == np.int8``. If the charlist mask is all zero then an empty string is returned no matter the ``min_length`` probability: An optional tuples of length and probability mask for the text. The length is expected to be between the ``min_length`` and ``max_length``. Otherwise, a random integer between ``min_length`` and ``max_length`` is selected. For the probability mask, we expect a numpy array of length of the charset passed with ``dtype == np.float64``. The sum of the probability mask should be 1, otherwise an exception is raised. Returns: A sampled string from the space """ if mask is not None and probability is not None: raise ValueError( f"Only one of `mask` or `probability` can be provided, actual values: mask={mask}, probability={probability}" ) elif mask is not None: length, charlist_mask = self._validate_mask(mask, np.int8, "mask") if charlist_mask is not None: assert np.all( np.logical_or(charlist_mask == 0, charlist_mask == 1) ), f"Expects all mask values to 0 or 1, actual values: {charlist_mask}" # normalise the mask to use as a probability if np.sum(charlist_mask) > 0: charlist_mask = charlist_mask / np.sum(charlist_mask) elif probability is not None: length, charlist_mask = self._validate_mask( probability, np.float64, "probability" ) if charlist_mask is not None: assert np.all( np.logical_and(charlist_mask >= 0, charlist_mask <= 1) ), f"Expects all probability mask values to be within 0 and 1, actual values: {charlist_mask}" assert np.isclose( np.sum(charlist_mask), 1 ), f"Expects the sum of the probability mask to be 1, actual sum: {np.sum(charlist_mask)}" else: length = charlist_mask = None if length is None: length = self.np_random.integers(self.min_length, self.max_length + 1) if charlist_mask is None: # uniform sampling charlist_mask = np.ones(len(self.character_set)) / len(self.character_set) if np.all(charlist_mask == 0): if self.min_length == 0: return "" else: # Otherwise the string will not be contained in the space raise ValueError( f"Trying to sample with a minimum length > 0 (actual minimum length={self.min_length}) but the character mask is all zero meaning that no character could be sampled." ) string = self.np_random.choice( self.character_list, size=length, p=charlist_mask ) return "".join(string) def _validate_mask( self, mask: tuple[int | None, NDArray[np.int8] | NDArray[np.float64] | None], expected_dtype: np.dtype, mask_type: str, ) -> tuple[int | None, NDArray[np.int8] | NDArray[np.float64] | None]: assert isinstance( mask, tuple ), f"Expects the `{mask_type}` type to be a tuple, actual type: {type(mask)}" assert ( len(mask) == 2 ), f"Expects the `{mask_type}` length to be two, actual length: {len(mask)}" length, charlist_mask = mask if length is not None: assert np.issubdtype( type(length), np.integer ), f"Expects the Text sample length to be an integer, actual type: {type(length)}" assert ( self.min_length <= length <= self.max_length ), f"Expects the Text sample length be between {self.min_length} and {self.max_length}, actual length: {length}" if charlist_mask is not None: assert isinstance( charlist_mask, np.ndarray ), f"Expects the Text sample `{mask_type}` to be an np.ndarray, actual type: {type(charlist_mask)}" assert ( charlist_mask.dtype == expected_dtype ), f"Expects the Text sample `{mask_type}` to be type {expected_dtype}, actual dtype: {charlist_mask.dtype}" assert charlist_mask.shape == ( len(self.character_set), ), f"expects the Text sample `{mask_type}` to be {(len(self.character_set),)}, actual shape: {charlist_mask.shape}" return length, charlist_mask def contains(self, x: Any) -> bool: """Return boolean specifying if x is a valid member of this space.""" if isinstance(x, str): if self.min_length <= len(x) <= self.max_length: return all(c in self.character_set for c in x) return False def __repr__(self) -> str: """Gives a string representation of this space.""" return f"Text({self.min_length}, {self.max_length}, charset={self.characters})" def __eq__(self, other: Any) -> bool: """Check whether ``other`` is equivalent to this instance.""" return ( isinstance(other, Text) and self.min_length == other.min_length and self.max_length == other.max_length and self.character_set == other.character_set ) @property def character_set(self) -> frozenset[str]: """Returns the character set for the space.""" return self._char_set @property def character_list(self) -> tuple[str, ...]: """Returns a tuple of characters in the space.""" return self._char_list def character_index(self, char: str) -> np.int32: """Returns a unique index for each character in the space's character set.""" return self._char_index[char] @property def characters(self) -> str: """Returns a string with all Text characters.""" return self._char_str @property def is_np_flattenable(self) -> bool: """The flattened version is an integer array for each character, padded to the max character length.""" return True
Text
python
graphql-python__graphene
graphene/relay/tests/test_custom_global_id.py
{ "start": 224, "end": 2491 }
class ____: def setup_method(self): self.user_list = [ {"id": uuid4(), "name": "First"}, {"id": uuid4(), "name": "Second"}, {"id": uuid4(), "name": "Third"}, {"id": uuid4(), "name": "Fourth"}, ] self.users = {user["id"]: user for user in self.user_list} class CustomNode(Node): class Meta: global_id_type = UUIDGlobalIDType class User(ObjectType): class Meta: interfaces = [CustomNode] name = String() @classmethod def get_node(cls, _type, _id): return self.users[_id] class RootQuery(ObjectType): user = CustomNode.Field(User) self.schema = Schema(query=RootQuery, types=[User]) self.graphql_schema = self.schema.graphql_schema def test_str_schema_correct(self): """ Check that the schema has the expected and custom node interface and user type and that they both use UUIDs """ parsed = re.findall(r"(.+) \{\n\s*([\w\W]*?)\n\}", str(self.schema)) types = [t for t, f in parsed] fields = [f for t, f in parsed] custom_node_interface = "interface CustomNode" assert custom_node_interface in types assert ( '"""The ID of the object"""\n id: UUID!' == fields[types.index(custom_node_interface)] ) user_type = "type User implements CustomNode" assert user_type in types assert ( '"""The ID of the object"""\n id: UUID!\n name: String' == fields[types.index(user_type)] ) def test_get_by_id(self): query = """query userById($id: UUID!) { user(id: $id) { id name } }""" # UUID need to be converted to string for serialization result = graphql_sync( self.graphql_schema, query, variable_values={"id": str(self.user_list[0]["id"])}, ) assert not result.errors assert result.data["user"]["id"] == str(self.user_list[0]["id"]) assert result.data["user"]["name"] == self.user_list[0]["name"]
TestUUIDGlobalID
python
scrapy__scrapy
tests/spiders.py
{ "start": 1754, "end": 2306 }
class ____(MetaSpider): name = "delay" def __init__(self, n=1, b=0, *args, **kwargs): super().__init__(*args, **kwargs) self.n = n self.b = b self.t1 = self.t2 = self.t2_err = 0 async def start(self): self.t1 = time.time() url = self.mockserver.url(f"/delay?n={self.n}&b={self.b}") yield Request(url, callback=self.parse, errback=self.errback) def parse(self, response): self.t2 = time.time() def errback(self, failure): self.t2_err = time.time()
DelaySpider
python
ansible__ansible
lib/ansible/errors/__init__.py
{ "start": 7018, "end": 7133 }
class ____(AnsibleParserError): """Errors caused during field attribute processing."""
AnsibleFieldAttributeError
python
python-poetry__poetry
tests/integration/test_utils_vcs_git.py
{ "start": 1088, "end": 13908 }
class ____(TypedDict): name: str | None branch: str | None tag: str | None revision: str | None source_root: Path | None clean: bool @pytest.fixture(autouse=True) def git_mock() -> None: pass @pytest.fixture(autouse=True) def setup(config: Config) -> None: pass REVISION_TO_VERSION_MAP = { "b6204750a763268e941cec1f05f8986b6c66913e": "0.1.0", # Annotated Tag "18d3ff247d288da701fc7f9ce2ec718388fca266": "0.1.1-alpha.0", "dd07e8d4efb82690e7975b289917a7782fbef29b": "0.2.0-alpha.0", "7263819922b4cd008afbb447f425a562432dad7d": "0.2.0-alpha.1", } BRANCH_TO_REVISION_MAP = {"0.1": "18d3ff247d288da701fc7f9ce2ec718388fca266"} TAG_TO_REVISION_MAP = {"v0.1.0": "b6204750a763268e941cec1f05f8986b6c66913e"} REF_TO_REVISION_MAP = { "branch": BRANCH_TO_REVISION_MAP, "tag": TAG_TO_REVISION_MAP, } @pytest.fixture def use_system_git_client(config: Config) -> None: config.merge({"system-git-client": True}) @pytest.fixture(scope="module") def source_url() -> str: return "https://github.com/python-poetry/test-fixture-vcs-repository.git" @pytest.fixture(scope="module") def source_directory_name(source_url: str) -> str: return Git.get_name_from_source_url(url=source_url) @pytest.fixture(scope="module") def local_repo( tmp_path_factory: TempPathFactory, source_directory_name: str ) -> Iterator[Repo]: with Repo.init( str(tmp_path_factory.mktemp("src") / source_directory_name), mkdir=True ) as repo: yield repo @pytest.fixture(scope="module") def _remote_refs(source_url: str, local_repo: Repo) -> FetchPackResult: client: GitClient path: str client, path = get_transport_and_path(source_url) return client.fetch( path, local_repo, determine_wants=local_repo.object_store.determine_wants_all ) @pytest.fixture def remote_refs(_remote_refs: FetchPackResult) -> FetchPackResult: return deepcopy(_remote_refs) @pytest.fixture(scope="module") def remote_default_ref(_remote_refs: FetchPackResult) -> bytes: ref: bytes = _remote_refs.symrefs[b"HEAD"] return ref @pytest.fixture(scope="module") def remote_default_branch(remote_default_ref: bytes) -> str: return remote_default_ref.decode("utf-8").replace("refs/heads/", "") # Regression test for https://github.com/python-poetry/poetry/issues/6722 def test_use_system_git_client_from_environment_variables() -> None: os.environ["POETRY_SYSTEM_GIT_CLIENT"] = "true" assert Git.is_using_legacy_client() def test_git_local_info( source_url: str, remote_refs: FetchPackResult, remote_default_ref: bytes ) -> None: with Git.clone(url=source_url) as repo: info = Git.info(repo=repo) assert info.origin == source_url assert info.revision == remote_refs.refs[remote_default_ref].decode("utf-8") @pytest.mark.parametrize( "specification", [{}, {"revision": "HEAD"}, {"branch": "HEAD"}] ) def test_git_clone_default_branch_head( specification: GitCloneKwargs, source_url: str, remote_refs: FetchPackResult, remote_default_ref: bytes, mocker: MockerFixture, ) -> None: spy = mocker.spy(Git, "_clone") spy_legacy = mocker.spy(Git, "_clone_legacy") with Git.clone(url=source_url, **specification) as repo: assert remote_refs.refs[remote_default_ref] == repo.head() spy_legacy.assert_not_called() spy.assert_called() def test_git_clone_fails_for_non_existent_branch(source_url: str) -> None: branch = uuid.uuid4().hex with pytest.raises(PoetryConsoleError) as e: Git.clone(url=source_url, branch=branch) assert f"Failed to clone {source_url} at '{branch}'" in str(e.value) def test_git_clone_fails_for_non_existent_revision(source_url: str) -> None: revision = sha1(uuid.uuid4().bytes).hexdigest() with pytest.raises(PoetryConsoleError) as e: Git.clone(url=source_url, revision=revision) assert f"Failed to clone {source_url} at '{revision}'" in str(e.value) def assert_version(repo: Repo, expected_revision: str) -> None: version = PyProjectTOML( path=Path(repo.path).joinpath("pyproject.toml") ).poetry_config["version"] revision = Git.get_revision(repo=repo) assert revision == expected_revision assert revision in REVISION_TO_VERSION_MAP assert version == REVISION_TO_VERSION_MAP[revision] def test_git_clone_when_branch_is_ref(source_url: str) -> None: with Git.clone(url=source_url, branch="refs/heads/0.1") as repo: assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"]) @pytest.mark.parametrize("branch", [*BRANCH_TO_REVISION_MAP.keys()]) def test_git_clone_branch( source_url: str, remote_refs: FetchPackResult, branch: str ) -> None: with Git.clone(url=source_url, branch=branch) as repo: assert_version(repo, BRANCH_TO_REVISION_MAP[branch]) @pytest.mark.parametrize("tag", [*TAG_TO_REVISION_MAP.keys()]) def test_git_clone_tag(source_url: str, remote_refs: FetchPackResult, tag: str) -> None: with Git.clone(url=source_url, tag=tag) as repo: assert_version(repo, TAG_TO_REVISION_MAP[tag]) def test_git_clone_multiple_times( source_url: str, remote_refs: FetchPackResult ) -> None: for revision in REVISION_TO_VERSION_MAP: with Git.clone(url=source_url, revision=revision) as repo: assert_version(repo, revision) def test_git_clone_revision_is_branch( source_url: str, remote_refs: FetchPackResult ) -> None: with Git.clone(url=source_url, revision="0.1") as repo: assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"]) def test_git_clone_revision_is_ref( source_url: str, remote_refs: FetchPackResult ) -> None: with Git.clone(url=source_url, revision="refs/heads/0.1") as repo: assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"]) @pytest.mark.parametrize( ("revision", "expected_revision"), [ ("0.1", BRANCH_TO_REVISION_MAP["0.1"]), ("v0.1.0", TAG_TO_REVISION_MAP["v0.1.0"]), *zip(REVISION_TO_VERSION_MAP, REVISION_TO_VERSION_MAP), ], ) def test_git_clone_revision_is_tag( source_url: str, remote_refs: FetchPackResult, revision: str, expected_revision: str ) -> None: with Git.clone(url=source_url, revision=revision) as repo: assert_version(repo, expected_revision) def test_git_clone_clones_submodules(source_url: str) -> None: with Git.clone(url=source_url) as repo: submodule_package_directory = ( Path(repo.path) / "submodules" / "sample-namespace-packages" ) assert submodule_package_directory.exists() assert submodule_package_directory.joinpath("README.md").exists() assert len(list(submodule_package_directory.glob("*"))) > 1 def test_git_clone_clones_submodules_with_relative_urls(source_url: str) -> None: with Git.clone(url=source_url, branch="relative_submodule") as repo: submodule_package_directory = ( Path(repo.path) / "submodules" / "relative-url-submodule" ) assert submodule_package_directory.exists() assert submodule_package_directory.joinpath("README.md").exists() assert len(list(submodule_package_directory.glob("*"))) > 1 def test_git_clone_clones_submodules_with_relative_urls_and_explicit_base( source_url: str, ) -> None: with Git.clone(url=source_url, branch="relative_submodule") as repo: submodule_package_directory = ( Path(repo.path) / "submodules" / "relative-url-submodule-with-base" ) assert submodule_package_directory.exists() assert submodule_package_directory.joinpath("README.md").exists() assert len(list(submodule_package_directory.glob("*"))) > 1 def test_system_git_fallback_on_http_401( mocker: MockerFixture, source_url: str, tmp_path: Path, ) -> None: spy = mocker.spy(Git, "_clone_legacy") mocker.patch.object( Git, "_clone", side_effect=HTTPUnauthorized(None, None), ) # use tmp_path for source_root to get a shorter path, # because long paths can cause issues with the system git client on Windows # despite of setting core.longpaths=true with Git.clone(url=source_url, branch="0.1", source_root=tmp_path) as repo: path = Path(repo.path) assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"]) spy.assert_called_with( url="https://github.com/python-poetry/test-fixture-vcs-repository.git", target=path, refspec=GitRefSpec(branch="0.1", revision=None, tag=None, ref=b"HEAD"), ) spy.assert_called_once() GIT_USERNAME = os.environ.get("POETRY_TEST_INTEGRATION_GIT_USERNAME") GIT_PASSWORD = os.environ.get("POETRY_TEST_INTEGRATION_GIT_PASSWORD") HTTP_AUTH_CREDENTIALS_UNAVAILABLE = not (GIT_USERNAME and GIT_PASSWORD) @pytest.mark.skipif( HTTP_AUTH_CREDENTIALS_UNAVAILABLE, reason="HTTP authentication credentials not available", ) def test_configured_repository_http_auth( mocker: MockerFixture, source_url: str, config: Config ) -> None: from poetry.vcs.git import backend spy_clone_legacy = mocker.spy(Git, "_clone_legacy") spy_get_transport_and_path = mocker.spy(backend, "get_transport_and_path") config.merge( { "repositories": {"git-repo": {"url": source_url}}, "http-basic": { "git-repo": { "username": GIT_USERNAME, "password": GIT_PASSWORD, } }, } ) dummy_git_config = ConfigFile() mocker.patch( "poetry.vcs.git.backend.Repo.get_config_stack", return_value=dummy_git_config, ) mocker.patch( "poetry.vcs.git.backend.get_default_authenticator", return_value=Authenticator(config=config), ) with Git.clone(url=source_url, branch="0.1") as repo: assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"]) spy_clone_legacy.assert_not_called() spy_get_transport_and_path.assert_called_with( location=source_url, config=dummy_git_config, username=GIT_USERNAME, password=GIT_PASSWORD, ) spy_get_transport_and_path.assert_called_once() def test_username_password_parameter_is_not_passed_to_dulwich( mocker: MockerFixture, source_url: str, config: Config ) -> None: from poetry.vcs.git import backend spy_clone = mocker.spy(Git, "_clone") spy_get_transport_and_path = mocker.spy(backend, "get_transport_and_path") dummy_git_config = ConfigFile() mocker.patch( "poetry.vcs.git.backend.Repo.get_config_stack", return_value=dummy_git_config, ) with Git.clone(url=source_url, branch="0.1") as repo: assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"]) spy_clone.assert_called_once() spy_get_transport_and_path.assert_called_with( location=source_url, config=dummy_git_config, ) spy_get_transport_and_path.assert_called_once() def test_system_git_called_when_configured( mocker: MockerFixture, source_url: str, use_system_git_client: None, tmp_path: Path ) -> None: spy_legacy = mocker.spy(Git, "_clone_legacy") spy = mocker.spy(Git, "_clone") # use tmp_path for source_root to get a shorter path, # because long paths can cause issues with the system git client on Windows # despite of setting core.longpaths=true with Git.clone(url=source_url, branch="0.1", source_root=tmp_path) as repo: path = Path(repo.path) assert_version(repo, BRANCH_TO_REVISION_MAP["0.1"]) spy.assert_not_called() spy_legacy.assert_called_once() spy_legacy.assert_called_with( url=source_url, target=path, refspec=GitRefSpec(branch="0.1", revision=None, tag=None, ref=b"HEAD"), ) def test_relative_submodules_with_ssh( source_url: str, tmpdir: Path, mocker: MockerFixture ) -> None: target = tmpdir / "temp" ssh_source_url = urlunparse(urlparse(source_url)._replace(scheme="ssh")) repo_with_unresolved_submodules = Git._clone( url=source_url, refspec=GitRefSpec(branch="relative_submodule"), target=target, ) # construct fake git config fake_config = ConfigFile( {(b"remote", b"origin"): {b"url": ssh_source_url.encode("utf-8")}} ) # trick Git into thinking remote.origin is an ssh url mock_get_config = mocker.patch.object(repo_with_unresolved_submodules, "get_config") mock_get_config.return_value = fake_config submodules = Git._get_submodules(repo_with_unresolved_submodules) assert [s.url for s in submodules] == [ "https://github.com/pypa/sample-namespace-packages.git", "ssh://github.com/python-poetry/test-fixture-vcs-repository.git", "ssh://github.com/python-poetry/test-fixture-vcs-repository.git", ]
GitCloneKwargs
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/triggers/step_function.py
{ "start": 1105, "end": 2685 }
class ____(AwsBaseWaiterTrigger): """ Trigger to poll for the completion of a Step Functions execution. :param execution_arn: ARN of the state machine to poll :param waiter_delay: The amount of time in seconds to wait between attempts. :param waiter_max_attempts: The maximum number of attempts to be made. :param aws_conn_id: The Airflow connection used for AWS credentials. """ def __init__( self, *, execution_arn: str, waiter_delay: int = 60, waiter_max_attempts: int = 30, aws_conn_id: str | None = None, region_name: str | None = None, **kwargs, ) -> None: super().__init__( serialized_fields={"execution_arn": execution_arn, "region_name": region_name}, waiter_name="step_function_succeeded", waiter_args={"executionArn": execution_arn}, failure_message="Step function failed", status_message="Status of step function execution is", status_queries=["status", "error", "cause"], return_key="execution_arn", return_value=execution_arn, waiter_delay=waiter_delay, waiter_max_attempts=waiter_max_attempts, aws_conn_id=aws_conn_id, **kwargs, ) def hook(self) -> AwsGenericHook: return StepFunctionHook( aws_conn_id=self.aws_conn_id, region_name=self.region_name, verify=self.verify, config=self.botocore_config, )
StepFunctionsExecutionCompleteTrigger
python
PyCQA__pylint
tests/functional/c/consider/consider_using_with_open.py
{ "start": 2453, "end": 5862 }
class ____: """ The message is triggered if a context manager is assigned to a variable, which name is later reassigned without the variable being used inside a ``with`` first. E.g. the following would trigger the message: a = open("foo") # <-- would trigger here a = "something new" But it must not happen that the logic which checks if the same variable is assigned multiple times in different code branches where only one of those assign statements is hit at runtime. For example, the variable could be assigned in an if-else construct. These tests check that the message is not triggered in those circumstances. """ def test_defined_in_if_and_else(self, predicate): if predicate: file_handle = open("foo", encoding="utf8") # must not trigger else: file_handle = open("bar", encoding="utf8") # must not trigger with file_handle: return file_handle.read() def test_defined_in_else_only(self, predicate): if predicate: result = "shiny watermelon" else: file_handle = open("foo", encoding="utf8") # must not trigger with file_handle: result = file_handle.read() return result def test_defined_in_if_only(self, predicate): if predicate: file_handle = open("foo", encoding="utf8") # must not trigger with file_handle: result = file_handle.read() else: result = "shiny watermelon" return result def test_triggers_if_reassigned_after_if_else(self, predicate): if predicate: file_handle = open("foo", encoding="utf8") else: file_handle = open( # [consider-using-with] "bar", encoding="utf8" ) file_handle = None return file_handle def test_defined_in_try_and_except(self): try: file_handle = open("foo", encoding="utf8") # must not trigger except FileNotFoundError: file_handle = open("bar", encoding="utf8") # must not trigger with file_handle: return file_handle.read() def test_defined_in_try_and_finally(self): try: file_handle = open("foo", encoding="utf8") # must not trigger except FileNotFoundError: Path("foo").touch() finally: # +1: [used-before-assignment] file_handle.open("foo", encoding="utf") # must not trigger consider-using-with with file_handle: return file_handle.read() def test_defined_in_different_except_handlers(self, a, b): try: result = a/b except ZeroDivisionError: logfile = open("math_errors.txt", encoding="utf8") # must not trigger result = "Can't divide by zero" except TypeError: logfile = open("type_errors.txt", encoding="utf8") # must not trigger result = "Wrong types" else: logfile = open("results.txt", encoding="utf8") # must not trigger with logfile: logfile.write(result) def test_multiple_return_statements(self, predicate): if predicate: return open("foo", encoding="utf8") # must not trigger return open("bar", encoding="utf8") # must not trigger
TestControlFlow
python
run-llama__llama_index
llama-index-core/llama_index/core/tools/query_plan.py
{ "start": 2550, "end": 8216 }
class ____(BaseTool): """ Query plan tool. A tool that takes in a list of tools and executes a query plan. """ def __init__( self, query_engine_tools: List[BaseTool], response_synthesizer: BaseSynthesizer, name: str, description_prefix: str, ) -> None: """Initialize.""" self._query_tools_dict = {t.metadata.name: t for t in query_engine_tools} self._response_synthesizer = response_synthesizer self._name = name self._description_prefix = description_prefix self._custom_metadata: Optional[ToolMetadata] = None @classmethod def from_defaults( cls, query_engine_tools: List[BaseTool], response_synthesizer: Optional[BaseSynthesizer] = None, name: Optional[str] = None, description_prefix: Optional[str] = None, ) -> "QueryPlanTool": """Initialize from defaults.""" name = name or DEFAULT_NAME description_prefix = description_prefix or DEFAULT_DESCRIPTION_PREFIX response_synthesizer = response_synthesizer or get_response_synthesizer() return cls( query_engine_tools=query_engine_tools, response_synthesizer=response_synthesizer, name=name, description_prefix=description_prefix, ) @property def metadata(self) -> ToolMetadata: """Metadata.""" if self._custom_metadata is not None: return self._custom_metadata tools_description = "\n\n".join( [ f"Tool Name: {tool.metadata.name}\n" + f"Tool Description: {tool.metadata.description} " for tool in self._query_tools_dict.values() ] ) # TODO: fill in description with query engine tools. description = f"""\ {self._description_prefix}\n\n {tools_description} """ return ToolMetadata(description, self._name, fn_schema=QueryPlan) @metadata.setter def metadata(self, value: ToolMetadata) -> None: self._custom_metadata = value def _execute_node( self, node: QueryNode, nodes_dict: Dict[int, QueryNode] ) -> ToolOutput: """Execute node.""" print_text(f"Executing node {node.model_dump_json()}\n", color="blue") if len(node.dependencies) > 0: print_text( f"Executing {len(node.dependencies)} child nodes\n", color="pink" ) child_query_nodes: List[QueryNode] = [ nodes_dict[dep] for dep in node.dependencies ] # execute the child nodes first child_responses: List[ToolOutput] = [ self._execute_node(child, nodes_dict) for child in child_query_nodes ] # form the child Node/NodeWithScore objects child_nodes = [] for child_query_node, child_response in zip( child_query_nodes, child_responses ): node_text = ( f"Query: {child_query_node.query_str}\n" f"Response: {child_response!s}\n" ) child_node = TextNode(text=node_text) child_nodes.append(child_node) # use response synthesizer to combine results child_nodes_with_scores = [ NodeWithScore(node=n, score=1.0) for n in child_nodes ] response_obj = self._response_synthesizer.synthesize( query=node.query_str, nodes=child_nodes_with_scores, ) response = ToolOutput( content=str(response_obj), tool_name=node.query_str, raw_input={"query": node.query_str}, raw_output=response_obj, ) if node.tool_name in self._query_tools_dict: tool = self._query_tools_dict[node.tool_name] print_text(f"Selected Tool: {tool.metadata}\n", color="pink") response = tool(node.query_str) else: # this is a leaf request, execute the query string using the specified tool tool = self._query_tools_dict[node.tool_name] print_text(f"Selected Tool: {tool.metadata}\n", color="pink") response = tool(node.query_str) print_text( "Executed query, got response.\n" f"Query: {node.query_str}\n" f"Response: {response!s}\n", color="blue", ) return response def _find_root_nodes(self, nodes_dict: Dict[int, QueryNode]) -> List[QueryNode]: """Find root node.""" # the root node is the one that isn't a dependency of any other node node_counts = dict.fromkeys(nodes_dict, 0) for node in nodes_dict.values(): for dep in node.dependencies: node_counts[dep] += 1 root_node_ids = [ node_id for node_id, count in node_counts.items() if count == 0 ] return [nodes_dict[node_id] for node_id in root_node_ids] def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput: """Call.""" # the kwargs represented as a JSON object # should be a QueryPlan object query_plan = QueryPlan(**kwargs) nodes_dict = {node.id: node for node in query_plan.nodes} root_nodes = self._find_root_nodes(nodes_dict) if len(root_nodes) > 1: raise ValueError("Query plan should have exactly one root node.") return self._execute_node(root_nodes[0], nodes_dict)
QueryPlanTool
python
jupyterlab__jupyterlab
jupyterlab/labextensions.py
{ "start": 8482, "end": 9601 }
class ____(BaseExtensionApp): description = "(developer) Build labextension" static_url = Unicode("", config=True, help="Sets the url for static assets when building") development = Bool(False, config=True, help="Build in development mode") source_map = Bool(False, config=True, help="Generate source maps") core_path = Unicode( os.path.join(HERE, "staging"), config=True, help="Directory containing core application package.json file", ) aliases = { "static-url": "BuildLabExtensionApp.static_url", "development": "BuildLabExtensionApp.development", "source-map": "BuildLabExtensionApp.source_map", "core-path": "BuildLabExtensionApp.core_path", } def run_task(self): self.extra_args = self.extra_args or [os.getcwd()] build_labextension( self.extra_args[0], logger=self.log, development=self.development, static_url=self.static_url or None, source_map=self.source_map, core_path=self.core_path or None, )
BuildLabExtensionApp
python
pytorch__pytorch
torchgen/gen.py
{ "start": 30358, "end": 38069 }
class ____: @method_with_native_function def __call__(self, f: NativeFunction) -> str | None: # We unconditionally generate function variants of the redispatch API. # This is mainly because we can namespace functions separately, but not methods, sig_group = CppSignatureGroup.from_native_function( f, method=False, fallback_binding=f.manual_cpp_binding ) result = "" for sig in sig_group.signatures(): target_sig = DispatcherSignature.from_schema(f.func) exprs = translate(sig.arguments(), target_sig.arguments()) exprs_str = ", ".join(["dispatchKeySet"] + [a.expr for a in exprs]) result += f""" // aten::{f.func} inline {sig.decl(is_redispatching_fn=True)} {{ return at::_ops::{f.func.name.unambiguous_name()}::redispatch({exprs_str}); }} """ return result # Generates ATenOpList.cpp, a runtime accessible list of all aten # operators. # TODO: This was historically used to help some JIT interop code # figure out whether or not to treat aten namespace'd operators # one way or another, we should reevaluate if this is actually needed. @with_native_function def compute_aten_op(f: NativeFunction) -> str: return f'{{"aten::{f.func.name.name}", "{f.func.name.overload_name}"}},' # Generates MetaFunctions.h def compute_meta_function_declaration(g: NativeFunctionsGroup) -> str | None: if not g.structured: return None with native_function_manager(g.out): name = meta.name(g) args = structured.meta_arguments(g) args_str = ", ".join(a.decl() for a in args) parent_class = g.out.structured_inherits if parent_class is None: parent_class = "at::impl::MetaBase" meta_return = "void" precomputed = g.out.precomputed if g.structured else None if precomputed: # Generate the template declaration with one bool parameter for each # precomputed element. Each parameter is true if the corresponding (in # terms of position) precomputed element has been set. precomputed_values = [*precomputed.replace.values(), precomputed.add] precomputed_elements = [ elem for replace_list in precomputed_values for elem in replace_list ] precomputed_template_parameters = [ elem.name.upper() for elem in precomputed_elements ] precomputed_template_params_str = ", ".join( f"bool {param} = false" for param in precomputed_template_parameters ) precompute_template_decl = f"template <{precomputed_template_params_str}>" # Generate a string containing declarations of all precomputed elements. precomputed_elements_with_cpp_types = [ structured.argument_type(elem, binds=elem.name) for elem in precomputed_elements ] precomputed_elements_decl = ";\n".join( f"{elem.cpp_type(strip_ref=True)} {elem.name}" for elem in precomputed_elements_with_cpp_types ) # Generate "setter" methods for each precomputed element. Each method will return # a new instance of precompute_out with the template parameter that corresponds to # the member set by the method to true (to indicate that it has been set). setter_methods = [] for i, elem in enumerate(precomputed_elements): # Generate the signature. The return type will be the same # as the type of `this` but with the template parameter # corresponding to the element set by this method set to true. # The assert generated below will ensure that this template # parameter is false on the type of `this`. return_ty_templates = ", ".join( precomputed_template_parameters[:i] + ["true"] + precomputed_template_parameters[i + 1 :] ) return_ty = f"precompute_out<{return_ty_templates}>" elem_cpp_ty = precomputed_elements_with_cpp_types[i].cpp_type( strip_ref=True ) signature = f"{return_ty} set_{elem.name}({elem_cpp_ty} value)" # Generate an assert which checks that the # template parameter corresponding to the precomputed # element that is set by this method is false on the # class corresponding to the object that `this` points to. # This ensures that each element can be set only once. assert_msg = f'"{elem.name} already set"' assert_stmt = f"static_assert({precomputed_template_parameters[i]} == false, {assert_msg});" # Generate the new object construction block. All state # except the element that this method sets is copied from the # object that `this` points to. The value for the element that # the method sets is taken from a method parameter. construction_stmts = [] construction_stmts.append(f"{return_ty} ret;") for j, elem in enumerate(precomputed_elements): if i == j: construction_stmts.append(f"ret.{elem.name} = value;") else: construction_stmts.append( f"ret.{elem.name} = this->{elem.name};" ) construction_stmts.append("return ret;") construction_block = "\n".join(construction_stmts) setter_methods.append( f""" {signature} {{ {assert_stmt} {construction_block} }} """ ) setter_methods_decl = "\n".join(setter_methods) # Meta should return an instance of the struct containing the precomputed elements. meta_return_template_params = ", ".join( ["true"] * len(precomputed_template_parameters) ) # This typedef (actually a using statement) is needed so that TORCH_META_FUNC can reuse the return # type (which has a variable number of template parameters). meta_return_typedef = f"using meta_return_ty = precompute_out <{meta_return_template_params}>;" meta_return = "meta_return_ty" precomputed_decl = f""" {precompute_template_decl} struct TORCH_API precompute_out {{ {setter_methods_decl} {precomputed_elements_decl}; }};""" else: meta_return_typedef = "" precomputed_decl = "" return f"""\ struct TORCH_API structured_{name} : public {parent_class} {{ {precomputed_decl} {meta_return_typedef} {meta_return} meta({args_str}); }}; """ def needs_backend_select(f: NativeFunction, selector: SelectiveBuilder) -> bool: name = str(f.func.name.name) if name.endswith("_like") or name.startswith("new_"): return False if f.func.arguments.tensor_options is None: return False return selector.is_native_function_selected(f) # Generates RegisterBackendSelect.cpp, a series of kernels which provide # specialized computation of dispatch key for operator signatures which cannot # be easily done automatically using templating. @dataclass(frozen=True)
ComputeRedispatchFunction
python
tensorflow__tensorflow
tensorflow/python/distribute/distribute_lib.py
{ "start": 156713, "end": 167631 }
class ____(ReplicaContextBase): __doc__ = ReplicaContextBase.__doc__ def all_gather(self, value, axis, options=None): """All-gathers `value` across all replicas along `axis`. Note: An `all_gather` method can only be called in replica context. For a cross-replica context counterpart, see `tf.distribute.Strategy.gather`. All replicas need to participate in the all-gather, otherwise this operation hangs. So if `all_gather` is called in any replica, it must be called in all replicas. Note: If there are multiple `all_gather` calls, they need to be executed in the same order on all replicas. Dispatching `all_gather` based on conditions is usually error-prone. For all strategies except `tf.distribute.TPUStrategy`, the input `value` on different replicas must have the same rank, and their shapes must be the same in all dimensions except the `axis`-th dimension. In other words, their shapes cannot be different in a dimension `d` where `d` does not equal to the `axis` argument. For example, given a `tf.distribute.DistributedValues` with component tensors of shape `(1, 2, 3)` and `(1, 3, 3)` on two replicas, you can call `all_gather(..., axis=1, ...)` on it, but not `all_gather(..., axis=0, ...)` or `all_gather(..., axis=2, ...)`. However, with `tf.distribute.TPUStrategy`, all tensors must have exactly the same rank and same shape. Note: The input `value` must have a non-zero rank. Otherwise, consider using `tf.expand_dims` before gathering them. You can pass in a single tensor to all-gather: >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) >>> @tf.function ... def gather_value(): ... ctx = tf.distribute.get_replica_context() ... local_value = tf.constant([1, 2, 3]) ... return ctx.all_gather(local_value, axis=0) >>> result = strategy.run(gather_value) >>> result PerReplica:{ 0: <tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 2, 3, 1, 2, 3], dtype=int32)>, 1: <tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 2, 3, 1, 2, 3], dtype=int32)> } >>> strategy.experimental_local_results(result) (<tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 2, 3, 1, 2, 3], dtype=int32)>, <tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 2, 3, 1, 2, 3], dtype=int32)>) You can also pass in a nested structure of tensors to all-gather, say, a list: >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) >>> @tf.function ... def gather_nest(): ... ctx = tf.distribute.get_replica_context() ... value_1 = tf.constant([1, 2, 3]) ... value_2 = tf.constant([[1, 2], [3, 4]]) ... # all_gather a nest of `tf.distribute.DistributedValues` ... return ctx.all_gather([value_1, value_2], axis=0) >>> result = strategy.run(gather_nest) >>> result [PerReplica:{ 0: <tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 2, 3, 1, 2, 3], dtype=int32)>, 1: <tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 2, 3, 1, 2, 3], dtype=int32)> }, PerReplica:{ 0: <tf.Tensor: shape=(4, 2), dtype=int32, numpy= array([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=int32)>, 1: <tf.Tensor: shape=(4, 2), dtype=int32, numpy= array([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=int32)> }] >>> strategy.experimental_local_results(result) ([<tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 2, 3, 1, 2, 3], dtype=int32)>, <tf.Tensor: shape=(4, 2), dtype=int32, numpy= array([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=int32)>], [<tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 2, 3, 1, 2, 3], dtype=int32)>, <tf.Tensor: shape=(4, 2), dtype=int32, numpy= array([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=int32)>]) What if you are all-gathering tensors with different shapes on different replicas? Consider the following example with two replicas, where you have `value` as a nested structure consisting of two items to all-gather, `a` and `b`. * On Replica 0, `value` is `{'a': [0], 'b': [[0, 1]]}`. * On Replica 1, `value` is `{'a': [1], 'b': [[2, 3], [4, 5]]}`. * Result for `all_gather` with `axis=0` (on each of the replicas) is: ``` {'a': [1, 2], 'b': [[0, 1], [2, 3], [4, 5]]} ``` Args: value: a nested structure of `tf.Tensor` which `tf.nest.flatten` accepts, or a `tf.distribute.DistributedValues` instance. The structure of the `tf.Tensor` need to be same on all replicas. The underlying tensor constructs can only be dense tensors with non-zero rank, NOT `tf.IndexedSlices`. axis: 0-D int32 Tensor. Dimension along which to gather. options: a `tf.distribute.experimental.CommunicationOptions`. Options to perform collective operations. This overrides the default options if the `tf.distribute.Strategy` takes one in the constructor. See `tf.distribute.experimental.CommunicationOptions` for details of the options. Returns: A nested structure of `tf.Tensor` with the gathered values. The structure is the same as `value`. """ for v in nest.flatten(value): if isinstance(v, indexed_slices.IndexedSlices): raise NotImplementedError("all_gather does not support IndexedSlices") if options is None: options = collective_util.Options() def batch_all_gather(strategy, *value_flat): return strategy.extended._batch_gather_to( # pylint: disable=protected-access [(v, _batch_reduce_destination(v)) for v in value_flat], axis, options) @custom_gradient.custom_gradient def grad_wrapper(*xs): ys = self.merge_call(batch_all_gather, args=xs) def grad(*dy_s): grads = self.all_reduce(reduce_util.ReduceOp.SUM, dy_s) new_grads = [] for i, grad in enumerate(grads): input_shape = array_ops.shape(xs[i]) axis_dim = array_ops.reshape(input_shape[axis], [1]) with ops.control_dependencies([array_ops.identity(grads)]): d = self.all_gather(axis_dim, axis=0) begin_dim = math_ops.reduce_sum(d[:self.replica_id_in_sync_group]) end_dim = begin_dim + array_ops.shape(xs[i])[axis] new_grad = array_ops.gather( grad, axis=axis, indices=math_ops.range(begin_dim, end_dim)) new_grads.append(new_grad) return new_grads return ys, grad return nest.pack_sequence_as(value, grad_wrapper(*nest.flatten(value))) def _update(self, var, fn, args=(), kwargs=None, group=True): """Run `fn` to update `var` with `args` and `kwargs` in replica context. `tf.distribute.ReplicaContext.update` takes a (distributed) variable `var` to be updated, an update function `fn`, and `args` and `kwargs` for `fn`. `fn` applies to each component variable of `var` with corresponding input values from `args` and `kwargs`. Example usage: >>> strategy = tf.distribute.MirroredStrategy(['GPU:0', 'GPU:1']) # 2 replicas >>> with strategy.scope(): ... distributed_variable = tf.Variable(5.0) >>> distributed_variable MirroredVariable:{ 0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=5.0>, 1: <tf.Variable 'Variable/replica_1:0' shape=() dtype=float32, numpy=5.0> } >>> def replica_fn(v): ... value = tf.identity(1.0) ... replica_context = tf.distribute.get_replica_context() ... update_fn = lambda var, value: var.assign(value) ... replica_context._update(v, update_fn, args=(value,)) >>> strategy.run(replica_fn, args=(distributed_variable,)) >>> distributed_variable MirroredVariable:{ 0: <tf.Variable 'Variable:0' shape=() dtype=float32, numpy=1.0>, 1: <tf.Variable 'Variable/replica_1:0' shape=() dtype=float32, numpy=1.0> } This API must be called in a replica context. Note that if `var` is a MirroredVariable (i.e., the type of variable created under the scope of a synchronous strategy, and is synchronized on-write, see `tf.VariableSynchronization` for more information) and `args`/`kwargs` contains different values for different replicas, `var` will be dangerously out of synchronization. Thus we recommend using `variable.assign(value)` as long as you can, which under the hood aggregates the updates and guarantees the synchronization. The case where you actually want this API instead of `variable.assign(value)` is that before assigning `value` to the `variable`, you'd like to conduct some pre-`assign` computation colocated with the variable devices (i.e. where variables reside, for MirroredStrategy they are the same as the compute device, for ParameterServerStrategy they refer to parameter servers). E.g., ```python strategy = tf.distribute.MirroredStrategy(['GPU:0', 'GPU:1']) # 2 replicas with strategy.scope(): v = tf.Variable(5.0, aggregation=tf.VariableAggregation.SUM) def replica_fn(inputs): value = computation(inputs) replica_context = tf.distribute.get_replica_context() reduced_value = replica_context.all_reduce(value) def update_fn(var, value): # this computation will colocate with `var`'s device updated_value = post_reduce_pre_update_computation(value) var.assign(value) replica_context._update(v, update_fn, args=(reduced_value,)) strategy.run(replica_fn, args=(inputs,)) ``` This code snippet is consistent across all strategies. If you directly compute and use `assign` in the replica context instead of wrapping it with `update`, for strategies with fewer variable devices than compute devices (e.g., parameter server strategy, usually), the `post_reduce_pre_update_computation` will happen N==number_of_compute_devices times which is less performant. Args: var: Variable, possibly distributed to multiple devices, to operate on. fn: Function to call. Should take the variable as the first argument. args: Tuple or list. Additional positional arguments to pass to `fn()`. kwargs: Dict with keyword arguments to pass to `fn()`. group: Boolean. Defaults to True. Most strategies enter a merge_call to conduct update in cross-replica context, and group=True guarantees updates on all replicas is executed. Returns: The return value of `fn` for the local replica. """ if kwargs is None: kwargs = {} return self._strategy.extended._replica_ctx_update(var, fn, args=args, kwargs=kwargs, group=group) # pylint: disable=protected-access @tf_export(v1=["distribute.ReplicaContext"])
ReplicaContext
python
modin-project__modin
asv_bench/benchmarks/io/csv.py
{ "start": 1478, "end": 2275 }
class ____(BaseReadCsv): shapes = get_benchmark_shapes("TimeReadCsvSkiprows") skiprows_mapping = { "lambda_even_rows": lambda x: x % 2, "range_uniform": np.arange(1, shapes[0][0] // 10), "range_step2": np.arange(1, shapes[0][0], 2), } data_type = "str_int" param_names = ["shape", "skiprows"] params = [ shapes, [None, "lambda_even_rows", "range_uniform", "range_step2"], ] def setup(self, test_filenames, shape, skiprows): super().setup(test_filenames, shape, skiprows) self.skiprows = self.skiprows_mapping[skiprows] if skiprows else None def time_skiprows(self, test_filenames, shape, skiprows): execute(IMPL.read_csv(test_filenames[self.shape_id], skiprows=self.skiprows))
TimeReadCsvSkiprows
python
doocs__leetcode
solution/2100-2199/2111.Minimum Operations to Make the Array K-Increasing/Solution.py
{ "start": 0, "end": 392 }
class ____: def kIncreasing(self, arr: List[int], k: int) -> int: def lis(arr): t = [] for x in arr: idx = bisect_right(t, x) if idx == len(t): t.append(x) else: t[idx] = x return len(arr) - len(t) return sum(lis(arr[i::k]) for i in range(k))
Solution
python
readthedocs__readthedocs.org
readthedocs/rtd_tests/tests/test_views.py
{ "start": 683, "end": 4078 }
class ____(TestCase): fixtures = ["eric", "test_data"] def assertRedirectToLogin(self, response): self.assertEqual(response.status_code, 302) url = response["Location"] e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(url) self.assertEqual(e_path, reverse("account_login")) def test_dashboard(self): response = self.client.get("/dashboard/") self.assertRedirectToLogin(response) def test_import_wizard_start(self): response = self.client.get("/dashboard/import/") self.assertRedirectToLogin(response) def test_import_wizard_manual(self): response = self.client.get("/dashboard/import/manual/") self.assertRedirectToLogin(response) def test_edit(self): response = self.client.get("/dashboard/pip/edit/") self.assertRedirectToLogin(response) def test_advanced(self): response = self.client.get("/dashboard/pip/advanced/") self.assertRedirectToLogin(response) def test_version_delete_html(self): response = self.client.get("/dashboard/pip/version/0.8.1/delete_html/") self.assertRedirectToLogin(response) def test_version_detail(self): response = self.client.get("/dashboard/pip/version/0.8.1/edit/") self.assertRedirectToLogin(response) def test_project_delete(self): response = self.client.get("/dashboard/pip/delete/") self.assertRedirectToLogin(response) def test_subprojects_delete(self): # This URL doesn't exist anymore, 404 response = self.client.get( "/dashboard/pip/subprojects/delete/a-subproject/", ) self.assertEqual(response.status_code, 404) # New URL response = self.client.get( "/dashboard/pip/subprojects/a-subproject/delete/", ) self.assertRedirectToLogin(response) def test_subprojects(self): response = self.client.get("/dashboard/pip/subprojects/") self.assertRedirectToLogin(response) def test_project_users(self): response = self.client.get("/dashboard/pip/users/") self.assertRedirectToLogin(response) def test_project_users_delete(self): response = self.client.get("/dashboard/pip/users/delete/") self.assertRedirectToLogin(response) def test_project_notifications(self): response = self.client.get("/dashboard/pip/notifications/") self.assertRedirectToLogin(response) def test_project_notifications_delete(self): response = self.client.get("/dashboard/pip/notifications/delete/") self.assertRedirectToLogin(response) def test_project_translations(self): response = self.client.get("/dashboard/pip/translations/") self.assertRedirectToLogin(response) def test_project_translations_delete(self): response = self.client.get( "/dashboard/pip/translations/delete/a-translation/", ) self.assertRedirectToLogin(response) def test_project_redirects(self): response = self.client.get("/dashboard/pip/redirects/") self.assertRedirectToLogin(response) def test_project_redirects_delete(self): response = self.client.get( reverse("projects_redirects_delete", args=["pip", 3]) ) self.assertRedirectToLogin(response)
PrivateViewsAreProtectedTests
python
charliermarsh__ruff
python/ruff-ecosystem/ruff_ecosystem/format.py
{ "start": 8780, "end": 9512 }
class ____(Enum): ruff_then_ruff = "ruff-then-ruff" """ Run Ruff baseline then Ruff comparison; checks for changes in behavior when formatting previously "formatted" code """ ruff_and_ruff = "ruff-and-ruff" """ Run Ruff baseline then reset and run Ruff comparison; checks changes in behavior when formatting "unformatted" code """ black_then_ruff = "black-then-ruff" """ Run Black baseline then Ruff comparison; checks for changes in behavior when formatting previously "formatted" code """ black_and_ruff = "black-and-ruff" """" Run Black baseline then reset and run Ruff comparison; checks changes in behavior when formatting "unformatted" code """
FormatComparison
python
kamyu104__LeetCode-Solutions
Python/non-decreasing-array.py
{ "start": 29, "end": 667 }
class ____(object): def checkPossibility(self, nums): """ :type nums: List[int] :rtype: bool """ modified, prev = False, nums[0] for i in xrange(1, len(nums)): if prev > nums[i]: if modified: return False if i-2 < 0 or nums[i-2] <= nums[i]: prev = nums[i] # nums[i-1] = nums[i], prev = nums[i] # else: # prev = nums[i-1] # nums[i] = nums[i-1], prev = nums[i] modified = True else: prev = nums[i] return True
Solution
python
django__django
tests/admin_docs/models.py
{ "start": 773, "end": 2759 }
class ____(models.Model): """ Stores information about a person, related to :model:`myapp.Company`. **Notes** Use ``save_changes()`` when saving this object. ``company`` Field storing :model:`myapp.Company` where the person works. (DESCRIPTION) .. raw:: html :file: admin_docs/evilfile.txt .. include:: admin_docs/evilfile.txt """ first_name = models.CharField(max_length=200, help_text="The person's first name") last_name = models.CharField(max_length=200, help_text="The person's last name") company = models.ForeignKey(Company, models.CASCADE, help_text="place of work") family = models.ForeignKey(Family, models.SET_NULL, related_name="+", null=True) groups = models.ManyToManyField(Group, help_text="has membership") def _get_full_name(self): return "%s %s" % (self.first_name, self.last_name) def rename_company(self, new_name): self.company.name = new_name self.company.save() return new_name def dummy_function(self, baz, rox, *some_args, **some_kwargs): return some_kwargs def dummy_function_keyword_only_arg(self, *, keyword_only_arg): return keyword_only_arg def all_kinds_arg_function(self, position_only_arg, /, arg, *, kwarg): return position_only_arg, arg, kwarg @property def a_property(self): return "a_property" @cached_property def a_cached_property(self): return "a_cached_property" def suffix_company_name(self, suffix="ltd"): return self.company.name + suffix def add_image(self): pass def delete_image(self): pass def save_changes(self): pass def set_status(self): pass def get_full_name(self): """ Get the full name of the person """ return self._get_full_name() def get_status_count(self): return 0 def get_groups_list(self): return []
Person
python
getsentry__sentry
src/sentry/eventstream/snuba.py
{ "start": 16278, "end": 21694 }
class ____(SnubaProtocolEventStream): def _send( self, project_id: int, _type: str, extra_data: tuple[Any, ...] = (), asynchronous: bool = True, headers: MutableMapping[str, str] | None = None, skip_semantic_partitioning: bool = False, event_type: EventStreamEventType = EventStreamEventType.Error, ) -> None: if headers is None: headers = {} if event_type == EventStreamEventType.Error: # error events now have a timestamp_ms field, this does not exist on the nodestore event # but instead should be derived from the datetime field on regular Snuba processing. # Since here we insert it using the eventstream API we need to add it manually if "datetime" in extra_data[0]: extra_data[0]["timestamp_ms"] = extra_data[0]["datetime"] data = (self.EVENT_PROTOCOL_VERSION, _type) + extra_data entity = "events" if event_type == EventStreamEventType.Transaction: entity = "transactions" if event_type == EventStreamEventType.Generic: entity = "search_issues" serialized_data = json.dumps(data) topic_mapping: Mapping[str, Topic] = { "events": Topic.EVENTS, "transactions": Topic.TRANSACTIONS, "search_issues": Topic.EVENTSTREAM_GENERIC, } codec = get_topic_codec(topic_mapping[entity]) codec.decode(serialized_data.encode("utf-8"), validate=True) try: resp = snuba._snuba_pool.urlopen( "POST", f"/tests/{entity}/eventstream", body=serialized_data, headers={f"X-Sentry-{k}": v for k, v in headers.items()}, ) if resp.status != 200: raise snuba.SnubaError( f"HTTP {resp.status} response from Snuba! {json.loads(resp.data)}" ) return None except urllib3.exceptions.HTTPError as err: raise snuba.SnubaError(err) def _send_item(self, trace_item: TraceItem) -> None: try: serialized = trace_item.SerializeToString() field = RequestField(name="item_0", data=serialized, filename="item_0") field.make_multipart(content_type="application/octet-stream") body, content_type = encode_multipart_formdata([field]) resp = snuba._snuba_pool.urlopen( "POST", EAP_ITEMS_INSERT_ENDPOINT, body=body, headers={"Content-Type": content_type}, ) if resp.status == 200: metrics.incr( "eventstream.eap.occurrence_insert.success", tags={"backend": "snuba_http"}, ) else: logger.warning( "Failed to insert EAP occurrence item via Snuba HTTP", extra={ "status": resp.status, "organization_id": trace_item.organization_id, "project_id": trace_item.project_id, "item_id": trace_item.item_id.decode("utf-8"), "trace_id": trace_item.trace_id, "backend": "snuba_http", }, ) metrics.incr( "eventstream.eap.occurrence_insert.failure", tags={"backend": "snuba_http"}, ) except Exception: logger.exception( "Exception while inserting EAP occurrence item via Snuba HTTP", extra={ "organization_id": trace_item.organization_id, "project_id": trace_item.project_id, "item_id": trace_item.item_id.decode("utf-8"), "trace_id": trace_item.trace_id, "backend": "snuba_http", }, ) metrics.incr( "eventstream.eap.occurrence_insert.failure", tags={"backend": "snuba_http"}, ) def requires_post_process_forwarder(self) -> bool: return False def insert( self, event: Event | GroupEvent, is_new: bool, is_regression: bool, is_new_group_environment: bool, primary_hash: str | None, received_timestamp: float | datetime, skip_consume: bool = False, group_states: GroupStates | None = None, eventstream_type: str | None = None, **kwargs: Any, ) -> None: super().insert( event, is_new, is_regression, is_new_group_environment, primary_hash, received_timestamp, skip_consume, group_states, **kwargs, ) self._dispatch_post_process_group_task( event.event_id, event.project_id, event.group_id, is_new, is_regression, is_new_group_environment, primary_hash, skip_consume, group_states, occurrence_id=event.occurrence_id if isinstance(event, GroupEvent) else None, eventstream_type=eventstream_type, )
SnubaEventStream
python
numba__numba
numba/tests/test_parallel_backend.py
{ "start": 41581, "end": 42150 }
class ____(TestCase): def test_vendors(self): """ Checks the OpenMP vendor strings are correct """ expected = dict() expected['win32'] = "MS" expected['darwin'] = "Intel" expected['linux'] = "GNU" # only check OS that are supported, custom toolchains may well work as # may other OS for k in expected.keys(): if sys.platform.startswith(k): self.assertEqual(expected[k], omppool.openmp_vendor) if __name__ == '__main__': unittest.main()
TestOpenMPVendors
python
airbytehq__airbyte
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py
{ "start": 1488, "end": 3088 }
class ____(FBMarketingStream): """AdCreative is append-only stream doc: https://developers.facebook.com/docs/marketing-api/reference/ad-creative """ entity_prefix = "adcreative" def __init__(self, fetch_thumbnail_images: bool = False, **kwargs): super().__init__(**kwargs) self._fetch_thumbnail_images = fetch_thumbnail_images def fields(self, **kwargs) -> List[str]: """Remove "thumbnail_data_url" field because it is a computed field, and it's not a field that we can request from Facebook""" if self._fields: return self._fields self._fields = [f for f in super().fields(**kwargs) if f != "thumbnail_data_url"] return self._fields def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: """Read with super method and append thumbnail_data_url if enabled""" for record in super().read_records(sync_mode, cursor_field, stream_slice, stream_state): if self._fetch_thumbnail_images: thumbnail_url = record.get("thumbnail_url") if thumbnail_url: record["thumbnail_data_url"] = fetch_thumbnail_data_url(thumbnail_url) yield record def list_objects(self, params: Mapping[str, Any], account_id: str) -> Iterable: return self._api.get_account(account_id=account_id).get_ad_creatives(params=params, fields=self.fields())
AdCreatives
python
scipy__scipy
scipy/stats/tests/test_resampling.py
{ "start": 59327, "end": 92213 }
class ____: rtol = 1e-14 def setup_method(self): self.rng = np.random.default_rng(7170559330470561044) # -- Input validation -- # def test_permutation_test_iv(self, xp): def stat(x, y, axis): return stats.ttest_ind((x, y), axis).statistic data = (xp.asarray([1, 2, 3]), xp.asarray([1, 2, 3])) message = "each sample in `data` must contain two or more ..." with pytest.raises(ValueError, match=message): permutation_test((data[0], xp.asarray([0])), stat) message = "`data` must be a tuple containing at least two samples" with pytest.raises(ValueError, match=message): permutation_test((1,), stat) with pytest.raises(TypeError, match=message): permutation_test(1, stat) message = "`axis` must be an integer." with pytest.raises(ValueError, match=message): permutation_test(data, stat, axis=1.5) message = "`permutation_type` must be in..." with pytest.raises(ValueError, match=message): permutation_test(data, stat, permutation_type="ekki") message = "`vectorized` must be `True`, `False`, or `None`." with pytest.raises(ValueError, match=message): permutation_test(data, stat, vectorized=1.5) message = "`n_resamples` must be a positive integer." with pytest.raises(ValueError, match=message): permutation_test(data, stat, n_resamples=-1000) message = "`n_resamples` must be a positive integer." with pytest.raises(ValueError, match=message): permutation_test(data, stat, n_resamples=1000.5) message = "`batch` must be a positive integer or None." with pytest.raises(ValueError, match=message): permutation_test(data, stat, batch=-1000) message = "`batch` must be a positive integer or None." with pytest.raises(ValueError, match=message): permutation_test(data, stat, batch=1000.5) message = "`alternative` must be in..." with pytest.raises(ValueError, match=message): permutation_test(data, stat, alternative='ekki') message = "SeedSequence expects int or sequence of ints" with pytest.raises(TypeError, match=message): permutation_test(data, stat, rng='herring') # -- Test Parameters -- # # SPEC-007 leave one call with seed to check it still works @pytest.mark.parametrize('random_state', [np.random.RandomState, np.random.default_rng]) @pytest.mark.parametrize('permutation_type', ['pairings', 'samples', 'independent']) def test_batch(self, permutation_type, random_state, xp): # make sure that the `batch` parameter is respected by checking the # maximum batch size provided in calls to `statistic` x = xp.asarray(self.rng.random(10)) y = xp.asarray(self.rng.random(10)) def statistic(x, y, axis): batch_size = 1 if x.ndim == 1 else x.shape[0] statistic.batch_size = max(batch_size, statistic.batch_size) statistic.counter += 1 return xp.mean(x, axis=axis) - xp.mean(y, axis=axis) statistic.counter = 0 statistic.batch_size = 0 kwds = {'n_resamples': 1000, 'permutation_type': permutation_type, 'vectorized': True} res1 = stats.permutation_test((x, y), statistic, batch=1, random_state=random_state(0), **kwds) assert statistic.counter == 1001 assert statistic.batch_size == 1 statistic.counter = 0 res2 = stats.permutation_test((x, y), statistic, batch=50, random_state=random_state(0), **kwds) assert statistic.counter == 21 assert statistic.batch_size == 50 statistic.counter = 0 res3 = stats.permutation_test((x, y), statistic, batch=1000, random_state=random_state(0), **kwds) assert statistic.counter == 2 assert statistic.batch_size == 1000 xp_assert_equal(res1.pvalue, res3.pvalue) xp_assert_equal(res2.pvalue, res3.pvalue) # SPEC-007 leave at least one call with seed to check it still works @pytest.mark.parametrize('random_state', [np.random.RandomState, np.random.default_rng]) @pytest.mark.parametrize('permutation_type, exact_size', [('pairings', special.factorial(3)**2), ('samples', 2**3), ('independent', special.binom(6, 3))]) def test_permutations(self, permutation_type, exact_size, random_state, xp): # make sure that the `permutations` parameter is respected by checking # the size of the null distribution x = xp.asarray(self.rng.random(3)) y = xp.asarray(self.rng.random(3)) def statistic(x, y, axis): return xp.mean(x, axis=axis) - xp.mean(y, axis=axis) kwds = {'permutation_type': permutation_type, 'vectorized': True} res = stats.permutation_test((x, y), statistic, n_resamples=3, random_state=random_state(0), **kwds) assert xp_size(res.null_distribution) == 3 res = stats.permutation_test((x, y), statistic, **kwds) assert xp_size(res.null_distribution) == exact_size # -- Randomized Permutation Tests -- # # To get reasonable accuracy, these next three tests are somewhat slow. # Originally, I had them passing for all combinations of permutation type, # alternative, and RNG, but that takes too long for CI. Instead, split # into three tests, each testing a particular combination of the three # parameters. def test_randomized_test_against_exact_both(self, xp): # check that the randomized and exact tests agree to reasonable # precision for permutation_type='both alternative, rng = 'less', 0 nx, ny, permutations = 8, 9, 24000 assert special.binom(nx + ny, nx) > permutations rng = np.random.default_rng(8235259808) x = xp.asarray(rng.standard_normal(size=nx)) y = xp.asarray(rng.standard_normal(size=ny)) data = x, y def statistic(x, y, axis): return xp.mean(x, axis=axis) - xp.mean(y, axis=axis) kwds = {'vectorized': True, 'permutation_type': 'independent', 'batch': 100, 'alternative': alternative, 'rng': rng} res = permutation_test(data, statistic, n_resamples=permutations, **kwds) res2 = permutation_test(data, statistic, n_resamples=xp.inf, **kwds) assert res.statistic == res2.statistic xp_assert_close(res.pvalue, res2.pvalue, atol=1e-2) @pytest.mark.slow() def test_randomized_test_against_exact_samples(self, xp): # check that the randomized and exact tests agree to reasonable # precision for permutation_type='samples' alternative, rng = 'greater', None nx, ny, permutations = 15, 15, 32000 assert 2**nx > permutations rng = np.random.default_rng(8235259808) x = xp.asarray(rng.standard_normal(size=nx)) y = xp.asarray(rng.standard_normal(size=ny)) data = x, y def statistic(x, y, axis): return xp.mean(x - y, axis=axis) kwds = {'vectorized': True, 'permutation_type': 'samples', 'batch': 100, 'alternative': alternative, 'rng': rng} res = permutation_test(data, statistic, n_resamples=permutations, **kwds) res2 = permutation_test(data, statistic, n_resamples=xp.inf, **kwds) assert res.statistic == res2.statistic xp_assert_close(res.pvalue, res2.pvalue, atol=1e-2) # I only need to skip torch on GPU because it doesn't have betaincc for pearsonr @pytest.mark.skip_xp_backends(cpu_only=True, exceptions=['cupy', 'jax.numpy']) def test_randomized_test_against_exact_pairings(self, xp): # check that the randomized and exact tests agree to reasonable # precision for permutation_type='pairings' alternative, rng = 'two-sided', self.rng nx, ny, permutations = 8, 8, 40000 assert special.factorial(nx) > permutations rng = np.random.default_rng(8235259808) x = xp.asarray(rng.standard_normal(size=nx)) y = xp.asarray(rng.standard_normal(size=ny)) data = [x] def statistic(x, axis): return stats.pearsonr(x, y, axis=axis).statistic kwds = {'vectorized': True, 'permutation_type': 'samples', 'batch': 100, 'alternative': alternative, 'rng': rng} res = permutation_test(data, statistic, n_resamples=permutations, **kwds) res2 = permutation_test(data, statistic, n_resamples=xp.inf, **kwds) assert res.statistic == res2.statistic xp_assert_close(res.pvalue, res2.pvalue, atol=1e-2) # -- Independent (Unpaired) Sample Tests -- # @pytest.mark.skip_xp_backends(eager_only=True) # TODO: change to jax_jit=False @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) def test_against_ks_2samp(self, alternative, xp): x = self.rng.normal(size=4, scale=1) y = self.rng.normal(size=5, loc=3, scale=3) expected = stats.ks_2samp(x, y, alternative=alternative, mode='exact') def statistic(x, y, axis): # todo: use `xp` as backend when `ks_2samp` is translated to array API x, y = _xp_copy_to_numpy(x), _xp_copy_to_numpy(y) res = stats.ks_2samp(x, y, axis=axis, mode='asymp', alternative=alternative) res = xp.asarray(res.statistic) return res[()] if res.ndim == 0 else res # ks_2samp is always a one-tailed 'greater' test # it's the statistic that changes (D+ vs D- vs max(D+, D-)) x, y = xp.asarray(x), xp.asarray(y) res = permutation_test((x, y), statistic, n_resamples=np.inf, alternative='greater', rng=self.rng) xp_assert_close(res.statistic, xp.asarray(expected.statistic), rtol=self.rtol) xp_assert_close(res.pvalue, xp.asarray(expected.pvalue), rtol=self.rtol) @pytest.mark.skip_xp_backends(eager_only=True) # TODO: change to jax_jit=False @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) def test_against_ansari(self, alternative, xp): x = self.rng.normal(size=4, scale=1) y = self.rng.normal(size=5, scale=3) # ansari has a different convention for 'alternative' alternative_correspondence = {"less": "greater", "greater": "less", "two-sided": "two-sided"} alternative_scipy = alternative_correspondence[alternative] expected = stats.ansari(x, y, alternative=alternative_scipy) def statistic(x, y, axis): # todo: use `xp` as backend when `ansari` is translated to array API x, y = _xp_copy_to_numpy(x), _xp_copy_to_numpy(y) res = stats.ansari(x, y, axis=axis) res = xp.asarray(res.statistic) return res[()] if res.ndim == 0 else res x, y = xp.asarray(x), xp.asarray(y) res = permutation_test((x, y), statistic, n_resamples=np.inf, alternative=alternative, rng=self.rng) xp_assert_close(res.statistic, xp.asarray(expected.statistic), rtol=self.rtol) xp_assert_close(res.pvalue, xp.asarray(expected.pvalue), rtol=self.rtol) @skip_xp_backends('cupy', reason='needs mannwhitneyu') @skip_xp_backends(eager_only=True) # mannwhitneyu does input validation @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) def test_against_mannwhitneyu(self, alternative, xp): x = stats.uniform.rvs(size=(3, 5, 2), loc=0, random_state=self.rng) y = stats.uniform.rvs(size=(3, 5, 2), loc=0.05, random_state=self.rng) expected = stats.mannwhitneyu(x, y, axis=1, alternative=alternative) def statistic(x, y, axis): return stats.mannwhitneyu(x, y, axis=axis, method='asymptotic').statistic x, y = xp.asarray(x), xp.asarray(y) res = permutation_test((x, y), statistic, vectorized=True, n_resamples=xp.inf, alternative=alternative, axis=1, rng=self.rng) xp_assert_close(res.statistic, xp.asarray(expected.statistic), rtol=self.rtol) xp_assert_close(res.pvalue, xp.asarray(expected.pvalue), rtol=self.rtol) @skip_xp_backends('cupy', reason='needs cramervonmises_2samp') @skip_xp_backends(eager_only=True) # cramervonmises_2samp does input validation @skip_xp_backends(cpu_only=True) # torch doesn't have `kv` def test_against_cvm(self, xp): x = stats.norm.rvs(size=4, scale=1, random_state=self.rng) y = stats.norm.rvs(size=5, loc=3, scale=3, random_state=self.rng) expected = stats.cramervonmises_2samp(x, y, method='exact') def statistic(x, y, axis): res = stats.cramervonmises_2samp(x, y, axis=axis, method='asymptotic') return res.statistic # cramervonmises_2samp has only one alternative, greater x, y = xp.asarray(x), xp.asarray(y) res = permutation_test((x, y), statistic, n_resamples=np.inf, alternative='greater', rng=self.rng) xp_assert_close(res.statistic, xp.asarray(expected.statistic), rtol=self.rtol) xp_assert_close(res.pvalue, xp.asarray(expected.pvalue), rtol=self.rtol) @skip_xp_backends('cupy', reason='needs kruskal') @skip_xp_backends(eager_only=True) # kruskal does input validation @pytest.mark.parametrize('axis', (-1, 2)) def test_vectorized_nsamp_ptype_both(self, axis, xp): # statistic only available for NumPy # Test that permutation_test with permutation_type='independent' works # properly for a 3-sample statistic with nd array samples of different # (but compatible) shapes and ndims. Show that exact permutation test # and random permutation tests approximate SciPy's asymptotic pvalues # and that exact and random permutation test results are even closer # to one another (than they are to the asymptotic results). # Three samples, different (but compatible) shapes with different ndims rng = np.random.default_rng(6709265303529651545) x = rng.random(size=(3)) y = rng.random(size=(1, 3, 2)) z = rng.random(size=(2, 1, 4)) data = (x, y, z) expected = stats.kruskal(*data, axis=axis) # Define the statistic (and pvalue for comparison) def statistic(*data, axis): return stats.kruskal(*data, axis=axis).statistic # Calculate exact and randomized permutation results kwds = {'axis': axis, 'alternative': 'greater', 'permutation_type': 'independent', 'rng': rng} data = [xp.asarray(data_) for data_ in data] res = permutation_test(data, statistic, n_resamples=xp.inf, **kwds) res2 = permutation_test(data, statistic, n_resamples=1000, **kwds) # Check results xp_assert_close(res.statistic, xp.asarray(expected.statistic), rtol=self.rtol*5) xp_assert_close(res.statistic, res2.statistic, rtol=self.rtol*5) xp_assert_close(res.pvalue, xp.asarray(expected.pvalue), atol=6e-2) xp_assert_close(res.pvalue, res2.pvalue, atol=3e-2) # -- Paired-Sample Tests -- # @pytest.mark.skip_xp_backends(eager_only=True) # TODO: change to jax_jit=False @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) def test_against_wilcoxon(self, alternative, xp): x = stats.uniform.rvs(size=(3, 6, 2), loc=0, random_state=self.rng) y = stats.uniform.rvs(size=(3, 6, 2), loc=0.05, random_state=self.rng) expected = stats.wilcoxon(x, y, alternative=alternative, axis=1) # We'll check both 1- and 2-sample versions of the same test; # we expect identical results to wilcoxon in all cases. def statistic_1samp_1d(z, axis): # todo: use `xp` as backend when `wilcoxon` is translated to array API # 'less' ensures we get the same of two statistics every time z = _xp_copy_to_numpy(z) res = stats.wilcoxon(z, alternative='less', axis=axis) res = xp.asarray(res.statistic) return res[()] if res.ndim == 0 else res def statistic_2samp_1d(x, y, axis): # todo: use `xp` as backend when `wilcoxon` is translated to array API x, y = _xp_copy_to_numpy(x), _xp_copy_to_numpy(y) res = stats.wilcoxon(x, y, alternative='less', axis=axis) res = xp.asarray(res.statistic) return res[()] if res.ndim == 0 else res x, y = xp.asarray(x), xp.asarray(y) kwds = {'axis': 1, 'alternative': alternative, 'permutation_type': 'samples', 'rng': self.rng, 'n_resamples': np.inf} res1 = permutation_test((x-y,), statistic_1samp_1d, **kwds) res2 = permutation_test((x, y), statistic_2samp_1d, **kwds) # `wilcoxon` returns a different statistic with 'two-sided' xp_assert_close(res1.statistic, res2.statistic, rtol=self.rtol) if alternative != 'two-sided': xp_assert_close(res2.statistic, xp.asarray(expected.statistic), rtol=self.rtol) xp_assert_close(res2.pvalue, xp.asarray(expected.pvalue), rtol=self.rtol) xp_assert_close(res1.pvalue, res2.pvalue, rtol=self.rtol) @pytest.mark.parametrize('alternative', ("less", "greater", "two-sided")) def test_against_binomtest(self, alternative, xp): x = self.rng.integers(0, 2, size=10) x[x == 0] = -1 # More naturally, the test would flip elements between 0 and one. # However, permutation_test will flip the _signs_ of the elements. # So we have to work with +1/-1 instead of 1/0. def statistic(x, axis=0): xp_ = array_namespace(x) return xp_.count_nonzero(x > 0, axis=axis) k, n, p = statistic(x), 10, 0.5 expected = stats.binomtest(k, n, p, alternative=alternative) res = stats.permutation_test((xp.asarray(x, dtype=xp.float64),), statistic, vectorized=True, permutation_type='samples', n_resamples=xp.inf, rng=self.rng, alternative=alternative) xp_assert_close(res.pvalue, xp.asarray(expected.pvalue), rtol=self.rtol) # -- Exact Association Tests -- # @pytest.mark.skip_xp_backends(eager_only=True) # TODO: change to jax_jit=False def test_against_kendalltau(self, xp): x = self.rng.normal(size=6) y = x + self.rng.normal(size=6) expected = stats.kendalltau(x, y, method='exact') def statistic(x, axis): # todo: use `xp` as backend when `kendalltau` is translated to array API x = _xp_copy_to_numpy(x) res = stats.kendalltau(x, y, method='asymptotic', axis=axis) res = xp.asarray(res.statistic) return res[()] if res.ndim == 0 else res # kendalltau currently has only one alternative, two-sided x = xp.asarray(x) res = permutation_test((x,), statistic, permutation_type='pairings', n_resamples=np.inf, rng=self.rng) xp_assert_close(res.statistic, xp.asarray(expected.statistic), rtol=self.rtol) xp_assert_close(res.pvalue, xp.asarray(expected.pvalue), rtol=self.rtol) @pytest.mark.parametrize('alternative', ('less', 'greater', 'two-sided')) def test_against_fisher_exact(self, alternative, xp): # x and y are binary random variables with some dependence rng = np.random.default_rng(6235696159000529929) x = (rng.random(7) > 0.6).astype(float) y = (rng.random(7) + 0.25*x > 0.6).astype(float) tab = stats.contingency.crosstab(x, y)[1] x, y = xp.asarray(x), xp.asarray(y) def statistic(x, axis): return xp.count_nonzero((x == 1) & (y == 1), axis=axis) res = permutation_test((x,), statistic, permutation_type='pairings', n_resamples=xp.inf, alternative=alternative, rng=rng) res2 = stats.fisher_exact(tab, alternative=alternative) xp_assert_close(res.pvalue, xp.asarray(res2.pvalue, dtype=x.dtype)) @pytest.mark.xslow() @pytest.mark.parametrize('axis', (-2, 1)) def test_vectorized_nsamp_ptype_samples(self, axis): # statistic only available for NumPy, and it's a pain to vectorize # Test that permutation_test with permutation_type='samples' works # properly for a 3-sample statistic with nd array samples of different # (but compatible) shapes and ndims. Show that exact permutation test # reproduces SciPy's exact pvalue and that random permutation test # approximates it. x = self.rng.random(size=(2, 4, 3)) y = self.rng.random(size=(1, 4, 3)) z = self.rng.random(size=(2, 4, 1)) x = stats.rankdata(x, axis=axis) y = stats.rankdata(y, axis=axis) z = stats.rankdata(z, axis=axis) y = y[0] # to check broadcast with different ndim data = (x, y, z) def statistic1d(*data): return stats.page_trend_test(data, ranked=True, method='asymptotic').statistic def pvalue1d(*data): return stats.page_trend_test(data, ranked=True, method='exact').pvalue statistic = _resampling._vectorize_statistic(statistic1d) pvalue = _resampling._vectorize_statistic(pvalue1d) expected_statistic = statistic(*np.broadcast_arrays(*data), axis=axis) expected_pvalue = pvalue(*np.broadcast_arrays(*data), axis=axis) # Let's forgive this use of an integer seed, please. kwds = {'vectorized': False, 'axis': axis, 'alternative': 'greater', 'permutation_type': 'pairings', 'rng': 0} res = permutation_test(data, statistic1d, n_resamples=np.inf, **kwds) res2 = permutation_test(data, statistic1d, n_resamples=5000, **kwds) assert_allclose(res.statistic, expected_statistic, rtol=self.rtol) assert_allclose(res.statistic, res2.statistic, rtol=self.rtol) assert_allclose(res.pvalue, expected_pvalue, rtol=self.rtol) assert_allclose(res.pvalue, res2.pvalue, atol=3e-2) # -- Test Against External References -- # tie_case_1 = {'x': [1, 2, 3, 4], 'y': [1.5, 2, 2.5], 'expected_less': 0.2000000000, 'expected_2sided': 0.4, # 2*expected_less 'expected_Pr_gte_S_mean': 0.3428571429, # see note below 'expected_statistic': 7.5, 'expected_avg': 9.142857, 'expected_std': 1.40698} tie_case_2 = {'x': [111, 107, 100, 99, 102, 106, 109, 108], 'y': [107, 108, 106, 98, 105, 103, 110, 105, 104], 'expected_less': 0.1555738379, 'expected_2sided': 0.3111476758, 'expected_Pr_gte_S_mean': 0.2969971205, # see note below 'expected_statistic': 32.5, 'expected_avg': 38.117647, 'expected_std': 5.172124} @pytest.mark.skip_xp_backends(eager_only=True) # TODO: change to jax_jit=False @pytest.mark.xslow() # only the second case is slow, really @pytest.mark.parametrize('case', (tie_case_1, tie_case_2)) def test_with_ties(self, case, xp): """ Results above from SAS PROC NPAR1WAY, e.g. DATA myData; INPUT X Y; CARDS; 1 1 1 2 1 3 1 4 2 1.5 2 2 2 2.5 ods graphics on; proc npar1way AB data=myData; class X; EXACT; run; ods graphics off; Note: SAS provides Pr >= |S-Mean|, which is different from our definition of a two-sided p-value. """ x = case['x'] y = case['y'] expected_statistic = xp.asarray(case['expected_statistic']) expected_less = xp.asarray(case['expected_less']) expected_2sided = xp.asarray(case['expected_2sided']) expected_Pr_gte_S_mean = xp.asarray(case['expected_Pr_gte_S_mean']) expected_avg = xp.asarray(case['expected_avg']) expected_std = xp.asarray(case['expected_std']) def statistic(x, y, axis): # todo: use `xp` as backend when `ansari` is translated to array API x, y = _xp_copy_to_numpy(x), _xp_copy_to_numpy(y) res = stats.ansari(x, y, axis=axis) res = xp.asarray(res.statistic) return res[()] if res.ndim == 0 else res dtype = xp_default_dtype(xp) x, y = xp.asarray(x, dtype=dtype), xp.asarray(y, dtype=dtype) with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "Ties preclude use of exact statistic", UserWarning) res = permutation_test((x, y), statistic, n_resamples=np.inf, alternative='less') res2 = permutation_test((x, y), statistic, n_resamples=np.inf, alternative='two-sided') xp_assert_close(res.statistic, expected_statistic, rtol=self.rtol) xp_assert_close(res.pvalue, expected_less, atol=1e-10) xp_assert_close(res2.pvalue, expected_2sided, atol=1e-10) xp_assert_close(xp.mean(res2.null_distribution), expected_avg, rtol=1e-6) xp_assert_close(xp.std(res2.null_distribution), expected_std, rtol=1e-6) # SAS provides Pr >= |S-Mean|; might as well check against that, too S = res.statistic mean = xp.mean(res.null_distribution) n = res.null_distribution.shape[0] Pr_gte_S_mean = xp.astype(xp.count_nonzero( xp.abs(res.null_distribution-mean) >= xp.abs(S-mean)), S.dtype) / n xp_assert_close(Pr_gte_S_mean, expected_Pr_gte_S_mean) @pytest.mark.slow @pytest.mark.parametrize('alternative, expected_pvalue', (('less', 0.9708333333333), ('greater', 0.05138888888889), ('two-sided', 0.1027777777778))) # I only need to skip torch on GPU because it doesn't have betaincc for pearsonr @pytest.mark.skip_xp_backends(cpu_only=True, exceptions=['cupy', 'jax.numpy']) @pytest.mark.skip_xp_backends(eager_only=True) # TODO: change to jax_jit=False def test_against_spearmanr_in_R(self, alternative, expected_pvalue, xp): """ Results above from R cor.test, e.g. options(digits=16) x <- c(1.76405235, 0.40015721, 0.97873798, 2.2408932, 1.86755799, -0.97727788) y <- c(2.71414076, 0.2488, 0.87551913, 2.6514917, 2.01160156, 0.47699563) cor.test(x, y, method = "spearm", alternative = "t") """ # data comes from # np.random.seed(0) # x = stats.norm.rvs(size=6) # y = x + stats.norm.rvs(size=6) x = xp.asarray([1.76405235, 0.40015721, 0.97873798, 2.2408932, 1.86755799, -0.97727788]) y = xp.asarray([2.71414076, 0.2488, 0.87551913, 2.6514917, 2.01160156, 0.47699563]) expected_statistic = 0.7714285714285715 y = xp.asarray(stats.rankdata(_xp_copy_to_numpy(y))) def statistic(x, axis): # `spearmanr` is not array api compatible, but `pearsonr` is. So for now # use _xp_copy_to_numpy just for ranking so we can run this test w/ CuPy. # TODO: use `xp` as backend when cupy works with `rankdata` x = xp.asarray(stats.rankdata(_xp_copy_to_numpy(x), axis=axis)) return stats.pearsonr(x, y, axis=axis).statistic res = permutation_test((x,), statistic, permutation_type='pairings', n_resamples=xp.inf, alternative=alternative) xp_assert_close(res.statistic, xp.asarray(expected_statistic), rtol=self.rtol) xp_assert_close(res.pvalue, xp.asarray(expected_pvalue), atol=1e-13) @pytest.mark.parametrize("batch", (-1, 0)) def test_batch_generator_iv(self, batch): with pytest.raises(ValueError, match="`batch` must be positive."): list(_resampling._batch_generator([1, 2, 3], batch)) batch_generator_cases = [(range(0), 3, []), (range(6), 3, [[0, 1, 2], [3, 4, 5]]), (range(8), 3, [[0, 1, 2], [3, 4, 5], [6, 7]])] @pytest.mark.parametrize("iterable, batch, expected", batch_generator_cases) def test_batch_generator(self, iterable, batch, expected): got = list(_resampling._batch_generator(iterable, batch)) assert got == expected @pytest.mark.fail_slow(2) # I only need to skip torch on GPU because it doesn't have betaincc for pearsonr @pytest.mark.skip_xp_backends(cpu_only=True, exceptions=['cupy', 'jax.numpy']) def test_finite_precision_statistic(self, xp): # Some statistics return numerically distinct values when the values # should be equal in theory. Test that `permutation_test` accounts # for this in some way. x = xp.asarray([1., 2., 4., 3.], dtype=xp.float64) y = xp.asarray([2., 4., 6., 8.], dtype=xp.float64) def statistic(x, y, axis): return stats.pearsonr(x, y, axis=axis)[0] res = stats.permutation_test((x, y), statistic, permutation_type='pairings') r, pvalue, null = res.statistic, res.pvalue, res.null_distribution correct_p = 2 * float(xp.count_nonzero(null >= r - 1e-14)) / null.shape[0] assert pvalue == correct_p == 1/3 # Compare against other exact correlation tests using R corr.test # options(digits=16) # x = c(1, 2, 4, 3) # y = c(2, 4, 6, 8) # cor.test(x, y, alternative = "t", method = "spearman") # 0.333333333 # cor.test(x, y, alternative = "t", method = "kendall") # 0.333333333 def test_all_partitions_concatenated(): # make sure that _all_paritions_concatenated produces the correct number # of partitions of the data into samples of the given sizes and that # all are unique n = np.array([3, 2, 4], dtype=int) nc = np.cumsum(n) all_partitions = set() counter = 0 for partition_concatenated in _resampling._all_partitions_concatenated(n): counter += 1 partitioning = np.split(partition_concatenated, nc[:-1]) all_partitions.add(tuple([frozenset(i) for i in partitioning])) expected = np.prod([special.binom(sum(n[i:]), sum(n[i+1:])) for i in range(len(n)-1)]) assert_equal(counter, expected) assert_equal(len(all_partitions), expected) @pytest.mark.parametrize('fun_name', ['bootstrap', 'permutation_test', 'monte_carlo_test']) def test_parameter_vectorized(fun_name): # Check that parameter `vectorized` is working as desired for all # resampling functions. Results don't matter; just don't fail asserts. rng = np.random.default_rng(75245098234592) sample = rng.random(size=10) def rvs(size): # needed by `monte_carlo_test` return stats.norm.rvs(size=size, random_state=rng) fun_options = {'bootstrap': {'data': (sample,), 'rng': rng, 'method': 'percentile'}, 'permutation_test': {'data': (sample,), 'rng': rng, 'permutation_type': 'samples'}, 'monte_carlo_test': {'sample': sample, 'rvs': rvs}} common_options = {'n_resamples': 100} fun = getattr(stats, fun_name) options = fun_options[fun_name] options.update(common_options) def statistic(x, axis): assert x.ndim > 1 or np.array_equal(x, sample) return np.mean(x, axis=axis) fun(statistic=statistic, vectorized=None, **options) fun(statistic=statistic, vectorized=True, **options) def statistic(x): assert x.ndim == 1 return np.mean(x) fun(statistic=statistic, vectorized=None, **options) fun(statistic=statistic, vectorized=False, **options)
TestPermutationTest
python
plotly__plotly.py
plotly/graph_objs/parcats/line/_colorbar.py
{ "start": 233, "end": 61588 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "parcats.line" _path_str = "parcats.line.colorbar" _valid_props = { "bgcolor", "bordercolor", "borderwidth", "dtick", "exponentformat", "labelalias", "len", "lenmode", "minexponent", "nticks", "orientation", "outlinecolor", "outlinewidth", "separatethousands", "showexponent", "showticklabels", "showtickprefix", "showticksuffix", "thickness", "thicknessmode", "tick0", "tickangle", "tickcolor", "tickfont", "tickformat", "tickformatstopdefaults", "tickformatstops", "ticklabeloverflow", "ticklabelposition", "ticklabelstep", "ticklen", "tickmode", "tickprefix", "ticks", "ticksuffix", "ticktext", "ticktextsrc", "tickvals", "tickvalssrc", "tickwidth", "title", "x", "xanchor", "xpad", "xref", "y", "yanchor", "ypad", "yref", } @property def bgcolor(self): """ Sets the color of padded area. The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["bgcolor"] @bgcolor.setter def bgcolor(self, val): self["bgcolor"] = val @property def bordercolor(self): """ Sets the axis line color. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["bordercolor"] @bordercolor.setter def bordercolor(self, val): self["bordercolor"] = val @property def borderwidth(self): """ Sets the width (in px) or the border enclosing this color bar. The 'borderwidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["borderwidth"] @borderwidth.setter def borderwidth(self, val): self["borderwidth"] = val @property def dtick(self): """ Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" The 'dtick' property accepts values of any type Returns ------- Any """ return self["dtick"] @dtick.setter def dtick(self, val): self["dtick"] = val @property def exponentformat(self): """ Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T (10^12). *SI extended* covers instead the full SI range from "quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI extended* is used and the exponent is beyond the above ranges, the formatting rule will automatically be switched to the power notation. The 'exponentformat' property is an enumeration that may be specified as: - One of the following enumeration values: ['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended'] Returns ------- Any """ return self["exponentformat"] @exponentformat.setter def exponentformat(self, val): self["exponentformat"] = val @property def labelalias(self): """ Replacement text for specific tick or hover labels. For example using {US: 'USA', CA: 'Canada'} changes US to USA and CA to Canada. The labels we would have shown must match the keys exactly, after adding any tickprefix or ticksuffix. For negative numbers the minus sign symbol used (U+2212) is wider than the regular ascii dash. That means you need to use −1 instead of -1. labelalias can be used with any axis type, and both keys (if needed) and values (if desired) can include html- like tags or MathJax. The 'labelalias' property accepts values of any type Returns ------- Any """ return self["labelalias"] @labelalias.setter def labelalias(self, val): self["labelalias"] = val @property def len(self): """ Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. The 'len' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["len"] @len.setter def len(self, val): self["len"] = val @property def lenmode(self): """ Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. The 'lenmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['fraction', 'pixels'] Returns ------- Any """ return self["lenmode"] @lenmode.setter def lenmode(self, val): self["lenmode"] = val @property def minexponent(self): """ Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". The 'minexponent' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["minexponent"] @minexponent.setter def minexponent(self, val): self["minexponent"] = val @property def nticks(self): """ Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". The 'nticks' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [0, 9223372036854775807] Returns ------- int """ return self["nticks"] @nticks.setter def nticks(self, val): self["nticks"] = val @property def orientation(self): """ Sets the orientation of the colorbar. The 'orientation' property is an enumeration that may be specified as: - One of the following enumeration values: ['h', 'v'] Returns ------- Any """ return self["orientation"] @orientation.setter def orientation(self, val): self["orientation"] = val @property def outlinecolor(self): """ Sets the axis line color. The 'outlinecolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["outlinecolor"] @outlinecolor.setter def outlinecolor(self, val): self["outlinecolor"] = val @property def outlinewidth(self): """ Sets the width (in px) of the axis line. The 'outlinewidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["outlinewidth"] @outlinewidth.setter def outlinewidth(self, val): self["outlinewidth"] = val @property def separatethousands(self): """ If "true", even 4-digit integers are separated The 'separatethousands' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["separatethousands"] @separatethousands.setter def separatethousands(self, val): self["separatethousands"] = val @property def showexponent(self): """ If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. The 'showexponent' property is an enumeration that may be specified as: - One of the following enumeration values: ['all', 'first', 'last', 'none'] Returns ------- Any """ return self["showexponent"] @showexponent.setter def showexponent(self, val): self["showexponent"] = val @property def showticklabels(self): """ Determines whether or not the tick labels are drawn. The 'showticklabels' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["showticklabels"] @showticklabels.setter def showticklabels(self, val): self["showticklabels"] = val @property def showtickprefix(self): """ If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. The 'showtickprefix' property is an enumeration that may be specified as: - One of the following enumeration values: ['all', 'first', 'last', 'none'] Returns ------- Any """ return self["showtickprefix"] @showtickprefix.setter def showtickprefix(self, val): self["showtickprefix"] = val @property def showticksuffix(self): """ Same as `showtickprefix` but for tick suffixes. The 'showticksuffix' property is an enumeration that may be specified as: - One of the following enumeration values: ['all', 'first', 'last', 'none'] Returns ------- Any """ return self["showticksuffix"] @showticksuffix.setter def showticksuffix(self, val): self["showticksuffix"] = val @property def thickness(self): """ Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. The 'thickness' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["thickness"] @thickness.setter def thickness(self, val): self["thickness"] = val @property def thicknessmode(self): """ Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. The 'thicknessmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['fraction', 'pixels'] Returns ------- Any """ return self["thicknessmode"] @thicknessmode.setter def thicknessmode(self, val): self["thicknessmode"] = val @property def tick0(self): """ Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. The 'tick0' property accepts values of any type Returns ------- Any """ return self["tick0"] @tick0.setter def tick0(self, val): self["tick0"] = val @property def tickangle(self): """ Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. The 'tickangle' property is a angle (in degrees) that may be specified as a number between -180 and 180. Numeric values outside this range are converted to the equivalent value (e.g. 270 is converted to -90). Returns ------- int|float """ return self["tickangle"] @tickangle.setter def tickangle(self, val): self["tickangle"] = val @property def tickcolor(self): """ Sets the tick color. The 'tickcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["tickcolor"] @tickcolor.setter def tickcolor(self, val): self["tickcolor"] = val @property def tickfont(self): """ Sets the color bar's tick label font The 'tickfont' property is an instance of Tickfont that may be specified as: - An instance of :class:`plotly.graph_objs.parcats.line.colorbar.Tickfont` - A dict of string/value properties that will be passed to the Tickfont constructor Returns ------- plotly.graph_objs.parcats.line.colorbar.Tickfont """ return self["tickfont"] @tickfont.setter def tickfont(self, val): self["tickfont"] = val @property def tickformat(self): """ Sets the tick label formatting rule using d3 formatting mini- languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for dates see: https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" The 'tickformat' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["tickformat"] @tickformat.setter def tickformat(self, val): self["tickformat"] = val @property def tickformatstops(self): """ The 'tickformatstops' property is a tuple of instances of Tickformatstop that may be specified as: - A list or tuple of instances of plotly.graph_objs.parcats.line.colorbar.Tickformatstop - A list or tuple of dicts of string/value properties that will be passed to the Tickformatstop constructor Returns ------- tuple[plotly.graph_objs.parcats.line.colorbar.Tickformatstop] """ return self["tickformatstops"] @tickformatstops.setter def tickformatstops(self, val): self["tickformatstops"] = val @property def tickformatstopdefaults(self): """ When used in a template (as layout.template.data.parcats.line.c olorbar.tickformatstopdefaults), sets the default property values to use for elements of parcats.line.colorbar.tickformatstops The 'tickformatstopdefaults' property is an instance of Tickformatstop that may be specified as: - An instance of :class:`plotly.graph_objs.parcats.line.colorbar.Tickformatstop` - A dict of string/value properties that will be passed to the Tickformatstop constructor Returns ------- plotly.graph_objs.parcats.line.colorbar.Tickformatstop """ return self["tickformatstopdefaults"] @tickformatstopdefaults.setter def tickformatstopdefaults(self, val): self["tickformatstopdefaults"] = val @property def ticklabeloverflow(self): """ Determines how we handle tick labels that would overflow either the graph div or the domain of the axis. The default value for inside tick labels is *hide past domain*. In other cases the default is *hide past div*. The 'ticklabeloverflow' property is an enumeration that may be specified as: - One of the following enumeration values: ['allow', 'hide past div', 'hide past domain'] Returns ------- Any """ return self["ticklabeloverflow"] @ticklabeloverflow.setter def ticklabeloverflow(self, val): self["ticklabeloverflow"] = val @property def ticklabelposition(self): """ Determines where tick labels are drawn relative to the ticks. Left and right options are used when `orientation` is "h", top and bottom when `orientation` is "v". The 'ticklabelposition' property is an enumeration that may be specified as: - One of the following enumeration values: ['outside', 'inside', 'outside top', 'inside top', 'outside left', 'inside left', 'outside right', 'inside right', 'outside bottom', 'inside bottom'] Returns ------- Any """ return self["ticklabelposition"] @ticklabelposition.setter def ticklabelposition(self, val): self["ticklabelposition"] = val @property def ticklabelstep(self): """ Sets the spacing between tick labels as compared to the spacing between ticks. A value of 1 (default) means each tick gets a label. A value of 2 means shows every 2nd label. A larger value n means only every nth tick is labeled. `tick0` determines which labels are shown. Not implemented for axes with `type` "log" or "multicategory", or when `tickmode` is "array". The 'ticklabelstep' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 9223372036854775807] Returns ------- int """ return self["ticklabelstep"] @ticklabelstep.setter def ticklabelstep(self, val): self["ticklabelstep"] = val @property def ticklen(self): """ Sets the tick length (in px). The 'ticklen' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["ticklen"] @ticklen.setter def ticklen(self, val): self["ticklen"] = val @property def tickmode(self): """ Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). The 'tickmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['auto', 'linear', 'array'] Returns ------- Any """ return self["tickmode"] @tickmode.setter def tickmode(self, val): self["tickmode"] = val @property def tickprefix(self): """ Sets a tick label prefix. The 'tickprefix' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["tickprefix"] @tickprefix.setter def tickprefix(self, val): self["tickprefix"] = val @property def ticks(self): """ Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. The 'ticks' property is an enumeration that may be specified as: - One of the following enumeration values: ['outside', 'inside', ''] Returns ------- Any """ return self["ticks"] @ticks.setter def ticks(self, val): self["ticks"] = val @property def ticksuffix(self): """ Sets a tick label suffix. The 'ticksuffix' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["ticksuffix"] @ticksuffix.setter def ticksuffix(self, val): self["ticksuffix"] = val @property def ticktext(self): """ Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. The 'ticktext' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray """ return self["ticktext"] @ticktext.setter def ticktext(self, val): self["ticktext"] = val @property def ticktextsrc(self): """ Sets the source reference on Chart Studio Cloud for `ticktext`. The 'ticktextsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["ticktextsrc"] @ticktextsrc.setter def ticktextsrc(self, val): self["ticktextsrc"] = val @property def tickvals(self): """ Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. The 'tickvals' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray """ return self["tickvals"] @tickvals.setter def tickvals(self, val): self["tickvals"] = val @property def tickvalssrc(self): """ Sets the source reference on Chart Studio Cloud for `tickvals`. The 'tickvalssrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["tickvalssrc"] @tickvalssrc.setter def tickvalssrc(self, val): self["tickvalssrc"] = val @property def tickwidth(self): """ Sets the tick width (in px). The 'tickwidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["tickwidth"] @tickwidth.setter def tickwidth(self, val): self["tickwidth"] = val @property def title(self): """ The 'title' property is an instance of Title that may be specified as: - An instance of :class:`plotly.graph_objs.parcats.line.colorbar.Title` - A dict of string/value properties that will be passed to the Title constructor Returns ------- plotly.graph_objs.parcats.line.colorbar.Title """ return self["title"] @title.setter def title(self, val): self["title"] = val @property def x(self): """ Sets the x position with respect to `xref` of the color bar (in plot fraction). When `xref` is "paper", defaults to 1.02 when `orientation` is "v" and 0.5 when `orientation` is "h". When `xref` is "container", defaults to 1 when `orientation` is "v" and 0.5 when `orientation` is "h". Must be between 0 and 1 if `xref` is "container" and between "-2" and 3 if `xref` is "paper". The 'x' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["x"] @x.setter def x(self, val): self["x"] = val @property def xanchor(self): """ Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. Defaults to "left" when `orientation` is "v" and "center" when `orientation` is "h". The 'xanchor' property is an enumeration that may be specified as: - One of the following enumeration values: ['left', 'center', 'right'] Returns ------- Any """ return self["xanchor"] @xanchor.setter def xanchor(self, val): self["xanchor"] = val @property def xpad(self): """ Sets the amount of padding (in px) along the x direction. The 'xpad' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["xpad"] @xpad.setter def xpad(self, val): self["xpad"] = val @property def xref(self): """ Sets the container `x` refers to. "container" spans the entire `width` of the plot. "paper" refers to the width of the plotting area only. The 'xref' property is an enumeration that may be specified as: - One of the following enumeration values: ['container', 'paper'] Returns ------- Any """ return self["xref"] @xref.setter def xref(self, val): self["xref"] = val @property def y(self): """ Sets the y position with respect to `yref` of the color bar (in plot fraction). When `yref` is "paper", defaults to 0.5 when `orientation` is "v" and 1.02 when `orientation` is "h". When `yref` is "container", defaults to 0.5 when `orientation` is "v" and 1 when `orientation` is "h". Must be between 0 and 1 if `yref` is "container" and between "-2" and 3 if `yref` is "paper". The 'y' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["y"] @y.setter def y(self, val): self["y"] = val @property def yanchor(self): """ Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. Defaults to "middle" when `orientation` is "v" and "bottom" when `orientation` is "h". The 'yanchor' property is an enumeration that may be specified as: - One of the following enumeration values: ['top', 'middle', 'bottom'] Returns ------- Any """ return self["yanchor"] @yanchor.setter def yanchor(self, val): self["yanchor"] = val @property def ypad(self): """ Sets the amount of padding (in px) along the y direction. The 'ypad' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["ypad"] @ypad.setter def ypad(self, val): self["ypad"] = val @property def yref(self): """ Sets the container `y` refers to. "container" spans the entire `height` of the plot. "paper" refers to the height of the plotting area only. The 'yref' property is an enumeration that may be specified as: - One of the following enumeration values: ['container', 'paper'] Returns ------- Any """ return self["yref"] @yref.setter def yref(self, val): self["yref"] = val @property def _prop_descriptions(self): return """\ bgcolor Sets the color of padded area. bordercolor Sets the axis line color. borderwidth Sets the width (in px) or the border enclosing this color bar. dtick Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" exponentformat Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T (10^12). *SI extended* covers instead the full SI range from "quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI extended* is used and the exponent is beyond the above ranges, the formatting rule will automatically be switched to the power notation. labelalias Replacement text for specific tick or hover labels. For example using {US: 'USA', CA: 'Canada'} changes US to USA and CA to Canada. The labels we would have shown must match the keys exactly, after adding any tickprefix or ticksuffix. For negative numbers the minus sign symbol used (U+2212) is wider than the regular ascii dash. That means you need to use −1 instead of -1. labelalias can be used with any axis type, and both keys (if needed) and values (if desired) can include html-like tags or MathJax. len Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. lenmode Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. minexponent Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". nticks Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". orientation Sets the orientation of the colorbar. outlinecolor Sets the axis line color. outlinewidth Sets the width (in px) of the axis line. separatethousands If "true", even 4-digit integers are separated showexponent If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. showticklabels Determines whether or not the tick labels are drawn. showtickprefix If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. showticksuffix Same as `showtickprefix` but for tick suffixes. thickness Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. thicknessmode Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. tick0 Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. tickangle Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. tickcolor Sets the tick color. tickfont Sets the color bar's tick label font tickformat Sets the tick label formatting rule using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for dates see: https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" tickformatstops A tuple of :class:`plotly.graph_objects.parcats.line.co lorbar.Tickformatstop` instances or dicts with compatible properties tickformatstopdefaults When used in a template (as layout.template.data.parcat s.line.colorbar.tickformatstopdefaults), sets the default property values to use for elements of parcats.line.colorbar.tickformatstops ticklabeloverflow Determines how we handle tick labels that would overflow either the graph div or the domain of the axis. The default value for inside tick labels is *hide past domain*. In other cases the default is *hide past div*. ticklabelposition Determines where tick labels are drawn relative to the ticks. Left and right options are used when `orientation` is "h", top and bottom when `orientation` is "v". ticklabelstep Sets the spacing between tick labels as compared to the spacing between ticks. A value of 1 (default) means each tick gets a label. A value of 2 means shows every 2nd label. A larger value n means only every nth tick is labeled. `tick0` determines which labels are shown. Not implemented for axes with `type` "log" or "multicategory", or when `tickmode` is "array". ticklen Sets the tick length (in px). tickmode Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). tickprefix Sets a tick label prefix. ticks Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. ticksuffix Sets a tick label suffix. ticktext Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. ticktextsrc Sets the source reference on Chart Studio Cloud for `ticktext`. tickvals Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. tickvalssrc Sets the source reference on Chart Studio Cloud for `tickvals`. tickwidth Sets the tick width (in px). title :class:`plotly.graph_objects.parcats.line.colorbar.Titl e` instance or dict with compatible properties x Sets the x position with respect to `xref` of the color bar (in plot fraction). When `xref` is "paper", defaults to 1.02 when `orientation` is "v" and 0.5 when `orientation` is "h". When `xref` is "container", defaults to 1 when `orientation` is "v" and 0.5 when `orientation` is "h". Must be between 0 and 1 if `xref` is "container" and between "-2" and 3 if `xref` is "paper". xanchor Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. Defaults to "left" when `orientation` is "v" and "center" when `orientation` is "h". xpad Sets the amount of padding (in px) along the x direction. xref Sets the container `x` refers to. "container" spans the entire `width` of the plot. "paper" refers to the width of the plotting area only. y Sets the y position with respect to `yref` of the color bar (in plot fraction). When `yref` is "paper", defaults to 0.5 when `orientation` is "v" and 1.02 when `orientation` is "h". When `yref` is "container", defaults to 0.5 when `orientation` is "v" and 1 when `orientation` is "h". Must be between 0 and 1 if `yref` is "container" and between "-2" and 3 if `yref` is "paper". yanchor Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. Defaults to "middle" when `orientation` is "v" and "bottom" when `orientation` is "h". ypad Sets the amount of padding (in px) along the y direction. yref Sets the container `y` refers to. "container" spans the entire `height` of the plot. "paper" refers to the height of the plotting area only. """ def __init__( self, arg=None, bgcolor=None, bordercolor=None, borderwidth=None, dtick=None, exponentformat=None, labelalias=None, len=None, lenmode=None, minexponent=None, nticks=None, orientation=None, outlinecolor=None, outlinewidth=None, separatethousands=None, showexponent=None, showticklabels=None, showtickprefix=None, showticksuffix=None, thickness=None, thicknessmode=None, tick0=None, tickangle=None, tickcolor=None, tickfont=None, tickformat=None, tickformatstops=None, tickformatstopdefaults=None, ticklabeloverflow=None, ticklabelposition=None, ticklabelstep=None, ticklen=None, tickmode=None, tickprefix=None, ticks=None, ticksuffix=None, ticktext=None, ticktextsrc=None, tickvals=None, tickvalssrc=None, tickwidth=None, title=None, x=None, xanchor=None, xpad=None, xref=None, y=None, yanchor=None, ypad=None, yref=None, **kwargs, ): """ Construct a new ColorBar object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.parcats.line.ColorBar` bgcolor Sets the color of padded area. bordercolor Sets the axis line color. borderwidth Sets the width (in px) or the border enclosing this color bar. dtick Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" exponentformat Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T (10^12). *SI extended* covers instead the full SI range from "quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI extended* is used and the exponent is beyond the above ranges, the formatting rule will automatically be switched to the power notation. labelalias Replacement text for specific tick or hover labels. For example using {US: 'USA', CA: 'Canada'} changes US to USA and CA to Canada. The labels we would have shown must match the keys exactly, after adding any tickprefix or ticksuffix. For negative numbers the minus sign symbol used (U+2212) is wider than the regular ascii dash. That means you need to use −1 instead of -1. labelalias can be used with any axis type, and both keys (if needed) and values (if desired) can include html-like tags or MathJax. len Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. lenmode Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. minexponent Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". nticks Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". orientation Sets the orientation of the colorbar. outlinecolor Sets the axis line color. outlinewidth Sets the width (in px) of the axis line. separatethousands If "true", even 4-digit integers are separated showexponent If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. showticklabels Determines whether or not the tick labels are drawn. showtickprefix If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. showticksuffix Same as `showtickprefix` but for tick suffixes. thickness Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. thicknessmode Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. tick0 Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. tickangle Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. tickcolor Sets the tick color. tickfont Sets the color bar's tick label font tickformat Sets the tick label formatting rule using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for dates see: https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" tickformatstops A tuple of :class:`plotly.graph_objects.parcats.line.co lorbar.Tickformatstop` instances or dicts with compatible properties tickformatstopdefaults When used in a template (as layout.template.data.parcat s.line.colorbar.tickformatstopdefaults), sets the default property values to use for elements of parcats.line.colorbar.tickformatstops ticklabeloverflow Determines how we handle tick labels that would overflow either the graph div or the domain of the axis. The default value for inside tick labels is *hide past domain*. In other cases the default is *hide past div*. ticklabelposition Determines where tick labels are drawn relative to the ticks. Left and right options are used when `orientation` is "h", top and bottom when `orientation` is "v". ticklabelstep Sets the spacing between tick labels as compared to the spacing between ticks. A value of 1 (default) means each tick gets a label. A value of 2 means shows every 2nd label. A larger value n means only every nth tick is labeled. `tick0` determines which labels are shown. Not implemented for axes with `type` "log" or "multicategory", or when `tickmode` is "array". ticklen Sets the tick length (in px). tickmode Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). tickprefix Sets a tick label prefix. ticks Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. ticksuffix Sets a tick label suffix. ticktext Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. ticktextsrc Sets the source reference on Chart Studio Cloud for `ticktext`. tickvals Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. tickvalssrc Sets the source reference on Chart Studio Cloud for `tickvals`. tickwidth Sets the tick width (in px). title :class:`plotly.graph_objects.parcats.line.colorbar.Titl e` instance or dict with compatible properties x Sets the x position with respect to `xref` of the color bar (in plot fraction). When `xref` is "paper", defaults to 1.02 when `orientation` is "v" and 0.5 when `orientation` is "h". When `xref` is "container", defaults to 1 when `orientation` is "v" and 0.5 when `orientation` is "h". Must be between 0 and 1 if `xref` is "container" and between "-2" and 3 if `xref` is "paper". xanchor Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. Defaults to "left" when `orientation` is "v" and "center" when `orientation` is "h". xpad Sets the amount of padding (in px) along the x direction. xref Sets the container `x` refers to. "container" spans the entire `width` of the plot. "paper" refers to the width of the plotting area only. y Sets the y position with respect to `yref` of the color bar (in plot fraction). When `yref` is "paper", defaults to 0.5 when `orientation` is "v" and 1.02 when `orientation` is "h". When `yref` is "container", defaults to 0.5 when `orientation` is "v" and 1 when `orientation` is "h". Must be between 0 and 1 if `yref` is "container" and between "-2" and 3 if `yref` is "paper". yanchor Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. Defaults to "middle" when `orientation` is "v" and "bottom" when `orientation` is "h". ypad Sets the amount of padding (in px) along the y direction. yref Sets the container `y` refers to. "container" spans the entire `height` of the plot. "paper" refers to the height of the plotting area only. Returns ------- ColorBar """ super().__init__("colorbar") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.parcats.line.ColorBar constructor must be a dict or an instance of :class:`plotly.graph_objs.parcats.line.ColorBar`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("bgcolor", arg, bgcolor) self._set_property("bordercolor", arg, bordercolor) self._set_property("borderwidth", arg, borderwidth) self._set_property("dtick", arg, dtick) self._set_property("exponentformat", arg, exponentformat) self._set_property("labelalias", arg, labelalias) self._set_property("len", arg, len) self._set_property("lenmode", arg, lenmode) self._set_property("minexponent", arg, minexponent) self._set_property("nticks", arg, nticks) self._set_property("orientation", arg, orientation) self._set_property("outlinecolor", arg, outlinecolor) self._set_property("outlinewidth", arg, outlinewidth) self._set_property("separatethousands", arg, separatethousands) self._set_property("showexponent", arg, showexponent) self._set_property("showticklabels", arg, showticklabels) self._set_property("showtickprefix", arg, showtickprefix) self._set_property("showticksuffix", arg, showticksuffix) self._set_property("thickness", arg, thickness) self._set_property("thicknessmode", arg, thicknessmode) self._set_property("tick0", arg, tick0) self._set_property("tickangle", arg, tickangle) self._set_property("tickcolor", arg, tickcolor) self._set_property("tickfont", arg, tickfont) self._set_property("tickformat", arg, tickformat) self._set_property("tickformatstops", arg, tickformatstops) self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults) self._set_property("ticklabeloverflow", arg, ticklabeloverflow) self._set_property("ticklabelposition", arg, ticklabelposition) self._set_property("ticklabelstep", arg, ticklabelstep) self._set_property("ticklen", arg, ticklen) self._set_property("tickmode", arg, tickmode) self._set_property("tickprefix", arg, tickprefix) self._set_property("ticks", arg, ticks) self._set_property("ticksuffix", arg, ticksuffix) self._set_property("ticktext", arg, ticktext) self._set_property("ticktextsrc", arg, ticktextsrc) self._set_property("tickvals", arg, tickvals) self._set_property("tickvalssrc", arg, tickvalssrc) self._set_property("tickwidth", arg, tickwidth) self._set_property("title", arg, title) self._set_property("x", arg, x) self._set_property("xanchor", arg, xanchor) self._set_property("xpad", arg, xpad) self._set_property("xref", arg, xref) self._set_property("y", arg, y) self._set_property("yanchor", arg, yanchor) self._set_property("ypad", arg, ypad) self._set_property("yref", arg, yref) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
ColorBar
python
doocs__leetcode
solution/0700-0799/0763.Partition Labels/Solution.py
{ "start": 0, "end": 326 }
class ____: def partitionLabels(self, s: str) -> List[int]: last = {c: i for i, c in enumerate(s)} mx = j = 0 ans = [] for i, c in enumerate(s): mx = max(mx, last[c]) if mx == i: ans.append(i - j + 1) j = i + 1 return ans
Solution
python
sympy__sympy
sympy/plotting/tests/test_plot.py
{ "start": 1807, "end": 48217 }
class ____(Plot): """ Used to verify if users can create their own backends. This backend is meant to pass all tests. """ def __new__(cls, *args, **kwargs): return object.__new__(cls) def show(self): pass def save(self): pass def close(self): pass def test_basic_plotting_backend(): x = Symbol('x') plot(x, (x, 0, 3), backend='text') plot(x**2 + 1, (x, 0, 3), backend='text') @pytest.mark.parametrize("adaptive", [True, False]) def test_plot_and_save_1(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') y = Symbol('y') with TemporaryDirectory(prefix='sympy_') as tmpdir: ### # Examples from the 'introduction' notebook ### p = plot(x, legend=True, label='f1', adaptive=adaptive, n=10) p = plot(x*sin(x), x*cos(x), label='f2', adaptive=adaptive, n=10) p.extend(p) p[0].line_color = lambda a: a p[1].line_color = 'b' p.title = 'Big title' p.xlabel = 'the x axis' p[1].label = 'straight line' p.legend = True p.aspect_ratio = (1, 1) p.xlim = (-15, 20) filename = 'test_basic_options_and_colors.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p.extend(plot(x + 1, adaptive=adaptive, n=10)) p.append(plot(x + 3, x**2, adaptive=adaptive, n=10)[1]) filename = 'test_plot_extend_append.png' p.save(os.path.join(tmpdir, filename)) p[2] = plot(x**2, (x, -2, 3), adaptive=adaptive, n=10) filename = 'test_plot_setitem.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot(sin(x), (x, -2*pi, 4*pi), adaptive=adaptive, n=10) filename = 'test_line_explicit.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot(sin(x), adaptive=adaptive, n=10) filename = 'test_line_default_range.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)), adaptive=adaptive, n=10) filename = 'test_line_multiple_range.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() raises(ValueError, lambda: plot(x, y)) #Piecewise plots p = plot(Piecewise((1, x > 0), (0, True)), (x, -1, 1), adaptive=adaptive, n=10) filename = 'test_plot_piecewise.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot(Piecewise((x, x < 1), (x**2, True)), (x, -3, 3), adaptive=adaptive, n=10) filename = 'test_plot_piecewise_2.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # test issue 7471 p1 = plot(x, adaptive=adaptive, n=10) p2 = plot(3, adaptive=adaptive, n=10) p1.extend(p2) filename = 'test_horizontal_line.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # test issue 10925 f = Piecewise((-1, x < -1), (x, And(-1 <= x, x < 0)), \ (x**2, And(0 <= x, x < 1)), (x**3, x >= 1)) p = plot(f, (x, -3, 3), adaptive=adaptive, n=10) filename = 'test_plot_piecewise_3.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() @pytest.mark.parametrize("adaptive", [True, False]) def test_plot_and_save_2(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') y = Symbol('y') z = Symbol('z') with TemporaryDirectory(prefix='sympy_') as tmpdir: #parametric 2d plots. #Single plot with default range. p = plot_parametric(sin(x), cos(x), adaptive=adaptive, n=10) filename = 'test_parametric.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() #Single plot with range. p = plot_parametric( sin(x), cos(x), (x, -5, 5), legend=True, label='parametric_plot', adaptive=adaptive, n=10) filename = 'test_parametric_range.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() #Multiple plots with same range. p = plot_parametric((sin(x), cos(x)), (x, sin(x)), adaptive=adaptive, n=10) filename = 'test_parametric_multiple.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() #Multiple plots with different ranges. p = plot_parametric( (sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)), adaptive=adaptive, n=10) filename = 'test_parametric_multiple_ranges.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() #depth of recursion specified. p = plot_parametric(x, sin(x), depth=13, adaptive=adaptive, n=10) filename = 'test_recursion_depth.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() #No adaptive sampling. p = plot_parametric(cos(x), sin(x), adaptive=False, n=500) filename = 'test_adaptive.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() #3d parametric plots p = plot3d_parametric_line( sin(x), cos(x), x, legend=True, label='3d_parametric_plot', adaptive=adaptive, n=10) filename = 'test_3d_line.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot3d_parametric_line( (sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)), adaptive=adaptive, n=10) filename = 'test_3d_line_multiple.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot3d_parametric_line(sin(x), cos(x), x, n=30, adaptive=adaptive) filename = 'test_3d_line_points.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # 3d surface single plot. p = plot3d(x * y, adaptive=adaptive, n=10) filename = 'test_surface.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # Multiple 3D plots with same range. p = plot3d(-x * y, x * y, (x, -5, 5), adaptive=adaptive, n=10) filename = 'test_surface_multiple.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # Multiple 3D plots with different ranges. p = plot3d( (x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)), adaptive=adaptive, n=10) filename = 'test_surface_multiple_ranges.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # Single Parametric 3D plot p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y, adaptive=adaptive, n=10) filename = 'test_parametric_surface.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # Multiple Parametric 3D plots. p = plot3d_parametric_surface( (x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)), (sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)), adaptive=adaptive, n=10) filename = 'test_parametric_surface.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # Single Contour plot. p = plot_contour(sin(x)*sin(y), (x, -5, 5), (y, -5, 5), adaptive=adaptive, n=10) filename = 'test_contour_plot.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # Multiple Contour plots with same range. p = plot_contour(x**2 + y**2, x**3 + y**3, (x, -5, 5), (y, -5, 5), adaptive=adaptive, n=10) filename = 'test_contour_plot.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # Multiple Contour plots with different range. p = plot_contour( (x**2 + y**2, (x, -5, 5), (y, -5, 5)), (x**3 + y**3, (x, -3, 3), (y, -3, 3)), adaptive=adaptive, n=10) filename = 'test_contour_plot.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() @pytest.mark.parametrize("adaptive", [True, False]) def test_plot_and_save_3(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') y = Symbol('y') z = Symbol('z') with TemporaryDirectory(prefix='sympy_') as tmpdir: ### # Examples from the 'colors' notebook ### p = plot(sin(x), adaptive=adaptive, n=10) p[0].line_color = lambda a: a filename = 'test_colors_line_arity1.png' p.save(os.path.join(tmpdir, filename)) p[0].line_color = lambda a, b: b filename = 'test_colors_line_arity2.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot(x*sin(x), x*cos(x), (x, 0, 10), adaptive=adaptive, n=10) p[0].line_color = lambda a: a filename = 'test_colors_param_line_arity1.png' p.save(os.path.join(tmpdir, filename)) p[0].line_color = lambda a, b: a filename = 'test_colors_param_line_arity1.png' p.save(os.path.join(tmpdir, filename)) p[0].line_color = lambda a, b: b filename = 'test_colors_param_line_arity2b.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot3d_parametric_line( sin(x) + 0.1*sin(x)*cos(7*x), cos(x) + 0.1*cos(x)*cos(7*x), 0.1*sin(7*x), (x, 0, 2*pi), adaptive=adaptive, n=10) p[0].line_color = lambdify_(x, sin(4*x)) filename = 'test_colors_3d_line_arity1.png' p.save(os.path.join(tmpdir, filename)) p[0].line_color = lambda a, b: b filename = 'test_colors_3d_line_arity2.png' p.save(os.path.join(tmpdir, filename)) p[0].line_color = lambda a, b, c: c filename = 'test_colors_3d_line_arity3.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5), adaptive=adaptive, n=10) p[0].surface_color = lambda a: a filename = 'test_colors_surface_arity1.png' p.save(os.path.join(tmpdir, filename)) p[0].surface_color = lambda a, b: b filename = 'test_colors_surface_arity2.png' p.save(os.path.join(tmpdir, filename)) p[0].surface_color = lambda a, b, c: c filename = 'test_colors_surface_arity3a.png' p.save(os.path.join(tmpdir, filename)) p[0].surface_color = lambdify_((x, y, z), sqrt((x - 3*pi)**2 + y**2)) filename = 'test_colors_surface_arity3b.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y, (x, -1, 1), (y, -1, 1), adaptive=adaptive, n=10) p[0].surface_color = lambda a: a filename = 'test_colors_param_surf_arity1.png' p.save(os.path.join(tmpdir, filename)) p[0].surface_color = lambda a, b: a*b filename = 'test_colors_param_surf_arity2.png' p.save(os.path.join(tmpdir, filename)) p[0].surface_color = lambdify_((x, y, z), sqrt(x**2 + y**2 + z**2)) filename = 'test_colors_param_surf_arity3.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() @pytest.mark.parametrize("adaptive", [True]) def test_plot_and_save_4(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') y = Symbol('y') ### # Examples from the 'advanced' notebook ### with TemporaryDirectory(prefix='sympy_') as tmpdir: i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y)) p = plot(i, (y, 1, 5), adaptive=adaptive, n=10, force_real_eval=True) filename = 'test_advanced_integral.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() @pytest.mark.parametrize("adaptive", [True, False]) def test_plot_and_save_5(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') y = Symbol('y') with TemporaryDirectory(prefix='sympy_') as tmpdir: s = Sum(1/x**y, (x, 1, oo)) p = plot(s, (y, 2, 10), adaptive=adaptive, n=10) filename = 'test_advanced_inf_sum.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p = plot(Sum(1/x, (x, 1, y)), (y, 2, 10), show=False, adaptive=adaptive, n=10) p[0].only_integers = True p[0].steps = True filename = 'test_advanced_fin_sum.png' # XXX: This should be fixed in experimental_lambdify or by using # ordinary lambdify so that it doesn't warn. The error results from # passing an array of values as the integration limit. # # UserWarning: The evaluation of the expression is problematic. We are # trying a failback method that may still work. Please report this as a # bug. with ignore_warnings(UserWarning): p.save(os.path.join(tmpdir, filename)) p._backend.close() @pytest.mark.parametrize("adaptive", [True, False]) def test_plot_and_save_6(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') with TemporaryDirectory(prefix='sympy_') as tmpdir: filename = 'test.png' ### # Test expressions that can not be translated to np and generate complex # results. ### p = plot(sin(x) + I*cos(x)) p.save(os.path.join(tmpdir, filename)) with ignore_warnings(RuntimeWarning): p = plot(sqrt(sqrt(-x))) p.save(os.path.join(tmpdir, filename)) p = plot(LambertW(x)) p.save(os.path.join(tmpdir, filename)) p = plot(sqrt(LambertW(x))) p.save(os.path.join(tmpdir, filename)) #Characteristic function of a StudentT distribution with nu=10 x1 = 5 * x**2 * exp_polar(-I*pi)/2 m1 = meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), x1) x2 = 5*x**2 * exp_polar(I*pi)/2 m2 = meijerg(((1/2,), ()), ((5, 0, 1/2), ()), x2) expr = (m1 + m2) / (48 * pi) with warns( UserWarning, match="The evaluation with NumPy/SciPy failed", test_stacklevel=False, ): p = plot(expr, (x, 1e-6, 1e-2), adaptive=adaptive, n=10) p.save(os.path.join(tmpdir, filename)) @pytest.mark.parametrize("adaptive", [True, False]) def test_plotgrid_and_save(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') y = Symbol('y') with TemporaryDirectory(prefix='sympy_') as tmpdir: p1 = plot(x, adaptive=adaptive, n=10) p2 = plot_parametric((sin(x), cos(x)), (x, sin(x)), show=False, adaptive=adaptive, n=10) p3 = plot_parametric( cos(x), sin(x), adaptive=adaptive, n=10, show=False) p4 = plot3d_parametric_line(sin(x), cos(x), x, show=False, adaptive=adaptive, n=10) # symmetric grid p = PlotGrid(2, 2, p1, p2, p3, p4) filename = 'test_grid1.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() # grid size greater than the number of subplots p = PlotGrid(3, 4, p1, p2, p3, p4) filename = 'test_grid2.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() p5 = plot(cos(x),(x, -pi, pi), show=False, adaptive=adaptive, n=10) p5[0].line_color = lambda a: a p6 = plot(Piecewise((1, x > 0), (0, True)), (x, -1, 1), show=False, adaptive=adaptive, n=10) p7 = plot_contour( (x**2 + y**2, (x, -5, 5), (y, -5, 5)), (x**3 + y**3, (x, -3, 3), (y, -3, 3)), show=False, adaptive=adaptive, n=10) # unsymmetric grid (subplots in one line) p = PlotGrid(1, 3, p5, p6, p7) filename = 'test_grid3.png' p.save(os.path.join(tmpdir, filename)) p._backend.close() @pytest.mark.parametrize("adaptive", [True, False]) def test_append_issue_7140(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') p1 = plot(x, adaptive=adaptive, n=10) p2 = plot(x**2, adaptive=adaptive, n=10) plot(x + 2, adaptive=adaptive, n=10) # append a series p2.append(p1[0]) assert len(p2._series) == 2 with raises(TypeError): p1.append(p2) with raises(TypeError): p1.append(p2._series) @pytest.mark.parametrize("adaptive", [True, False]) def test_issue_15265(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') eqn = sin(x) p = plot(eqn, xlim=(-S.Pi, S.Pi), ylim=(-1, 1), adaptive=adaptive, n=10) p._backend.close() p = plot(eqn, xlim=(-1, 1), ylim=(-S.Pi, S.Pi), adaptive=adaptive, n=10) p._backend.close() p = plot(eqn, xlim=(-1, 1), adaptive=adaptive, n=10, ylim=(sympify('-3.14'), sympify('3.14'))) p._backend.close() p = plot(eqn, adaptive=adaptive, n=10, xlim=(sympify('-3.14'), sympify('3.14')), ylim=(-1, 1)) p._backend.close() raises(ValueError, lambda: plot(eqn, adaptive=adaptive, n=10, xlim=(-S.ImaginaryUnit, 1), ylim=(-1, 1))) raises(ValueError, lambda: plot(eqn, adaptive=adaptive, n=10, xlim=(-1, 1), ylim=(-1, S.ImaginaryUnit))) raises(ValueError, lambda: plot(eqn, adaptive=adaptive, n=10, xlim=(S.NegativeInfinity, 1), ylim=(-1, 1))) raises(ValueError, lambda: plot(eqn, adaptive=adaptive, n=10, xlim=(-1, 1), ylim=(-1, S.Infinity))) def test_empty_Plot(): if not matplotlib: skip("Matplotlib not the default backend") # No exception showing an empty plot plot() # Plot is only a base class: doesn't implement any logic for showing # images p = Plot() raises(NotImplementedError, lambda: p.show()) @pytest.mark.parametrize("adaptive", [True, False]) def test_issue_17405(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') f = x**0.3 - 10*x**3 + x**2 p = plot(f, (x, -10, 10), adaptive=adaptive, n=30, show=False) # Random number of segments, probably more than 100, but we want to see # that there are segments generated, as opposed to when the bug was present # RuntimeWarning: invalid value encountered in double_scalars with ignore_warnings(RuntimeWarning): assert len(p[0].get_data()[0]) >= 30 @pytest.mark.parametrize("adaptive", [True, False]) def test_logplot_PR_16796(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') p = plot(x, (x, .001, 100), adaptive=adaptive, n=30, xscale='log', show=False) # Random number of segments, probably more than 100, but we want to see # that there are segments generated, as opposed to when the bug was present assert len(p[0].get_data()[0]) >= 30 assert p[0].end == 100.0 assert p[0].start == .001 @pytest.mark.parametrize("adaptive", [True, False]) def test_issue_16572(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') p = plot(LambertW(x), show=False, adaptive=adaptive, n=30) # Random number of segments, probably more than 50, but we want to see # that there are segments generated, as opposed to when the bug was present assert len(p[0].get_data()[0]) >= 30 @pytest.mark.parametrize("adaptive", [True, False]) def test_issue_11865(adaptive): if not matplotlib: skip("Matplotlib not the default backend") k = Symbol('k', integer=True) f = Piecewise((-I*exp(I*pi*k)/k + I*exp(-I*pi*k)/k, Ne(k, 0)), (2*pi, True)) p = plot(f, show=False, adaptive=adaptive, n=30) # Random number of segments, probably more than 100, but we want to see # that there are segments generated, as opposed to when the bug was present # and that there are no exceptions. assert len(p[0].get_data()[0]) >= 30 @skip_under_pyodide("Warnings not emitted in Pyodide because of lack of WASM fp exception support") def test_issue_11461(): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') p = plot(real_root((log(x/(x-2))), 3), show=False, adaptive=True) with warns( RuntimeWarning, match="invalid value encountered in", test_stacklevel=False, ): # Random number of segments, probably more than 100, but we want to see # that there are segments generated, as opposed to when the bug was present # and that there are no exceptions. assert len(p[0].get_data()[0]) >= 30 @pytest.mark.parametrize("adaptive", [True, False]) def test_issue_11764(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') p = plot_parametric(cos(x), sin(x), (x, 0, 2 * pi), aspect_ratio=(1,1), show=False, adaptive=adaptive, n=30) assert p.aspect_ratio == (1, 1) # Random number of segments, probably more than 100, but we want to see # that there are segments generated, as opposed to when the bug was present assert len(p[0].get_data()[0]) >= 30 @pytest.mark.parametrize("adaptive", [True, False]) def test_issue_13516(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') pm = plot(sin(x), backend="matplotlib", show=False, adaptive=adaptive, n=30) assert pm.backend == MatplotlibBackend assert len(pm[0].get_data()[0]) >= 30 pt = plot(sin(x), backend="text", show=False, adaptive=adaptive, n=30) assert pt.backend == TextBackend assert len(pt[0].get_data()[0]) >= 30 pd = plot(sin(x), backend="default", show=False, adaptive=adaptive, n=30) assert pd.backend == MatplotlibBackend assert len(pd[0].get_data()[0]) >= 30 p = plot(sin(x), show=False, adaptive=adaptive, n=30) assert p.backend == MatplotlibBackend assert len(p[0].get_data()[0]) >= 30 @pytest.mark.parametrize("adaptive", [True, False]) def test_plot_limits(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') p = plot(x, x**2, (x, -10, 10), adaptive=adaptive, n=10) backend = p._backend xmin, xmax = backend.ax.get_xlim() assert abs(xmin + 10) < 2 assert abs(xmax - 10) < 2 ymin, ymax = backend.ax.get_ylim() assert abs(ymin + 10) < 10 assert abs(ymax - 100) < 10 @pytest.mark.parametrize("adaptive", [True, False]) def test_plot3d_parametric_line_limits(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') v1 = (2*cos(x), 2*sin(x), 2*x, (x, -5, 5)) v2 = (sin(x), cos(x), x, (x, -5, 5)) p = plot3d_parametric_line(v1, v2, adaptive=adaptive, n=60) backend = p._backend xmin, xmax = backend.ax.get_xlim() assert abs(xmin + 2) < 1e-2 assert abs(xmax - 2) < 1e-2 ymin, ymax = backend.ax.get_ylim() assert abs(ymin + 2) < 1e-2 assert abs(ymax - 2) < 1e-2 zmin, zmax = backend.ax.get_zlim() assert abs(zmin + 10) < 1e-2 assert abs(zmax - 10) < 1e-2 p = plot3d_parametric_line(v2, v1, adaptive=adaptive, n=60) backend = p._backend xmin, xmax = backend.ax.get_xlim() assert abs(xmin + 2) < 1e-2 assert abs(xmax - 2) < 1e-2 ymin, ymax = backend.ax.get_ylim() assert abs(ymin + 2) < 1e-2 assert abs(ymax - 2) < 1e-2 zmin, zmax = backend.ax.get_zlim() assert abs(zmin + 10) < 1e-2 assert abs(zmax - 10) < 1e-2 @pytest.mark.parametrize("adaptive", [True, False]) def test_plot_size(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') p1 = plot(sin(x), backend="matplotlib", size=(8, 4), adaptive=adaptive, n=10) s1 = p1._backend.fig.get_size_inches() assert (s1[0] == 8) and (s1[1] == 4) p2 = plot(sin(x), backend="matplotlib", size=(5, 10), adaptive=adaptive, n=10) s2 = p2._backend.fig.get_size_inches() assert (s2[0] == 5) and (s2[1] == 10) p3 = PlotGrid(2, 1, p1, p2, size=(6, 2), adaptive=adaptive, n=10) s3 = p3._backend.fig.get_size_inches() assert (s3[0] == 6) and (s3[1] == 2) with raises(ValueError): plot(sin(x), backend="matplotlib", size=(-1, 3)) def test_issue_20113(): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') # verify the capability to use custom backends plot(sin(x), backend=Plot, show=False) p2 = plot(sin(x), backend=MatplotlibBackend, show=False) assert p2.backend == MatplotlibBackend assert len(p2[0].get_data()[0]) >= 30 p3 = plot(sin(x), backend=DummyBackendOk, show=False) assert p3.backend == DummyBackendOk assert len(p3[0].get_data()[0]) >= 30 # test for an improper coded backend p4 = plot(sin(x), backend=DummyBackendNotOk, show=False) assert p4.backend == DummyBackendNotOk assert len(p4[0].get_data()[0]) >= 30 with raises(NotImplementedError): p4.show() with raises(NotImplementedError): p4.save("test/path") with raises(NotImplementedError): p4._backend.close() def test_custom_coloring(): x = Symbol('x') y = Symbol('y') plot(cos(x), line_color=lambda a: a) plot(cos(x), line_color=1) plot(cos(x), line_color="r") plot_parametric(cos(x), sin(x), line_color=lambda a: a) plot_parametric(cos(x), sin(x), line_color=1) plot_parametric(cos(x), sin(x), line_color="r") plot3d_parametric_line(cos(x), sin(x), x, line_color=lambda a: a) plot3d_parametric_line(cos(x), sin(x), x, line_color=1) plot3d_parametric_line(cos(x), sin(x), x, line_color="r") plot3d_parametric_surface(cos(x + y), sin(x - y), x - y, (x, -5, 5), (y, -5, 5), surface_color=lambda a, b: a**2 + b**2) plot3d_parametric_surface(cos(x + y), sin(x - y), x - y, (x, -5, 5), (y, -5, 5), surface_color=1) plot3d_parametric_surface(cos(x + y), sin(x - y), x - y, (x, -5, 5), (y, -5, 5), surface_color="r") plot3d(x*y, (x, -5, 5), (y, -5, 5), surface_color=lambda a, b: a**2 + b**2) plot3d(x*y, (x, -5, 5), (y, -5, 5), surface_color=1) plot3d(x*y, (x, -5, 5), (y, -5, 5), surface_color="r") @pytest.mark.parametrize("adaptive", [True, False]) def test_deprecated_get_segments(adaptive): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') f = sin(x) p = plot(f, (x, -10, 10), show=False, adaptive=adaptive, n=10) with warns_deprecated_sympy(): p[0].get_segments() @pytest.mark.parametrize("adaptive", [True, False]) def test_generic_data_series(adaptive): # verify that no errors are raised when generic data series are used if not matplotlib: skip("Matplotlib not the default backend") x = Symbol("x") p = plot(x, markers=[{"args":[[0, 1], [0, 1]], "marker": "*", "linestyle": "none"}], annotations=[{"text": "test", "xy": (0, 0)}], fill={"x": [0, 1, 2, 3], "y1": [0, 1, 2, 3]}, rectangles=[{"xy": (0, 0), "width": 5, "height": 1}], adaptive=adaptive, n=10) assert len(p._backend.ax.collections) == 1 assert len(p._backend.ax.patches) == 1 assert len(p._backend.ax.lines) == 2 assert len(p._backend.ax.texts) == 1 def test_deprecated_markers_annotations_rectangles_fill(): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') p = plot(sin(x), (x, -10, 10), show=False) with warns_deprecated_sympy(): p.markers = [{"args":[[0, 1], [0, 1]], "marker": "*", "linestyle": "none"}] assert len(p._series) == 2 with warns_deprecated_sympy(): p.annotations = [{"text": "test", "xy": (0, 0)}] assert len(p._series) == 3 with warns_deprecated_sympy(): p.fill = {"x": [0, 1, 2, 3], "y1": [0, 1, 2, 3]} assert len(p._series) == 4 with warns_deprecated_sympy(): p.rectangles = [{"xy": (0, 0), "width": 5, "height": 1}] assert len(p._series) == 5 def test_back_compatibility(): if not matplotlib: skip("Matplotlib not the default backend") x = Symbol('x') y = Symbol('y') p = plot(sin(x), adaptive=False, n=5) assert len(p[0].get_points()) == 2 assert len(p[0].get_data()) == 2 p = plot_parametric(cos(x), sin(x), (x, 0, 2), adaptive=False, n=5) assert len(p[0].get_points()) == 2 assert len(p[0].get_data()) == 3 p = plot3d_parametric_line(cos(x), sin(x), x, (x, 0, 2), adaptive=False, n=5) assert len(p[0].get_points()) == 3 assert len(p[0].get_data()) == 4 p = plot3d(cos(x**2 + y**2), (x, -pi, pi), (y, -pi, pi), n=5) assert len(p[0].get_meshes()) == 3 assert len(p[0].get_data()) == 3 p = plot_contour(cos(x**2 + y**2), (x, -pi, pi), (y, -pi, pi), n=5) assert len(p[0].get_meshes()) == 3 assert len(p[0].get_data()) == 3 p = plot3d_parametric_surface(x * cos(y), x * sin(y), x * cos(4 * y) / 2, (x, 0, pi), (y, 0, 2*pi), n=5) assert len(p[0].get_meshes()) == 3 assert len(p[0].get_data()) == 5 def test_plot_arguments(): ### Test arguments for plot() if not matplotlib: skip("Matplotlib not the default backend") x, y = symbols("x, y") # single expressions p = plot(x + 1) assert isinstance(p[0], LineOver1DRangeSeries) assert p[0].expr == x + 1 assert p[0].ranges == [(x, -10, 10)] assert p[0].get_label(False) == "x + 1" assert p[0].rendering_kw == {} # single expressions custom label p = plot(x + 1, "label") assert isinstance(p[0], LineOver1DRangeSeries) assert p[0].expr == x + 1 assert p[0].ranges == [(x, -10, 10)] assert p[0].get_label(False) == "label" assert p[0].rendering_kw == {} # single expressions with range p = plot(x + 1, (x, -2, 2)) assert p[0].ranges == [(x, -2, 2)] # single expressions with range, label and rendering-kw dictionary p = plot(x + 1, (x, -2, 2), "test", {"color": "r"}) assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {"color": "r"} # multiple expressions p = plot(x + 1, x**2) assert isinstance(p[0], LineOver1DRangeSeries) assert p[0].expr == x + 1 assert p[0].ranges == [(x, -10, 10)] assert p[0].get_label(False) == "x + 1" assert p[0].rendering_kw == {} assert isinstance(p[1], LineOver1DRangeSeries) assert p[1].expr == x**2 assert p[1].ranges == [(x, -10, 10)] assert p[1].get_label(False) == "x**2" assert p[1].rendering_kw == {} # multiple expressions over the same range p = plot(x + 1, x**2, (x, 0, 5)) assert p[0].ranges == [(x, 0, 5)] assert p[1].ranges == [(x, 0, 5)] # multiple expressions over the same range with the same rendering kws p = plot(x + 1, x**2, (x, 0, 5), {"color": "r"}) assert p[0].ranges == [(x, 0, 5)] assert p[1].ranges == [(x, 0, 5)] assert p[0].rendering_kw == {"color": "r"} assert p[1].rendering_kw == {"color": "r"} # multiple expressions with different ranges, labels and rendering kws p = plot( (x + 1, (x, 0, 5)), (x**2, (x, -2, 2), "test", {"color": "r"})) assert isinstance(p[0], LineOver1DRangeSeries) assert p[0].expr == x + 1 assert p[0].ranges == [(x, 0, 5)] assert p[0].get_label(False) == "x + 1" assert p[0].rendering_kw == {} assert isinstance(p[1], LineOver1DRangeSeries) assert p[1].expr == x**2 assert p[1].ranges == [(x, -2, 2)] assert p[1].get_label(False) == "test" assert p[1].rendering_kw == {"color": "r"} # single argument: lambda function f = lambda t: t p = plot(lambda t: t) assert isinstance(p[0], LineOver1DRangeSeries) assert callable(p[0].expr) assert p[0].ranges[0][1:] == (-10, 10) assert p[0].get_label(False) == "" assert p[0].rendering_kw == {} # single argument: lambda function + custom range and label p = plot(f, ("t", -5, 6), "test") assert p[0].ranges[0][1:] == (-5, 6) assert p[0].get_label(False) == "test" def test_plot_parametric_arguments(): ### Test arguments for plot_parametric() if not matplotlib: skip("Matplotlib not the default backend") x, y = symbols("x, y") # single parametric expression p = plot_parametric(x + 1, x) assert isinstance(p[0], Parametric2DLineSeries) assert p[0].expr == (x + 1, x) assert p[0].ranges == [(x, -10, 10)] assert p[0].get_label(False) == "x" assert p[0].rendering_kw == {} # single parametric expression with custom range, label and rendering kws p = plot_parametric(x + 1, x, (x, -2, 2), "test", {"cmap": "Reds"}) assert p[0].expr == (x + 1, x) assert p[0].ranges == [(x, -2, 2)] assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {"cmap": "Reds"} p = plot_parametric((x + 1, x), (x, -2, 2), "test") assert p[0].expr == (x + 1, x) assert p[0].ranges == [(x, -2, 2)] assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {} # multiple parametric expressions same symbol p = plot_parametric((x + 1, x), (x ** 2, x + 1)) assert p[0].expr == (x + 1, x) assert p[0].ranges == [(x, -10, 10)] assert p[0].get_label(False) == "x" assert p[0].rendering_kw == {} assert p[1].expr == (x ** 2, x + 1) assert p[1].ranges == [(x, -10, 10)] assert p[1].get_label(False) == "x" assert p[1].rendering_kw == {} # multiple parametric expressions different symbols p = plot_parametric((x + 1, x), (y ** 2, y + 1, "test")) assert p[0].expr == (x + 1, x) assert p[0].ranges == [(x, -10, 10)] assert p[0].get_label(False) == "x" assert p[0].rendering_kw == {} assert p[1].expr == (y ** 2, y + 1) assert p[1].ranges == [(y, -10, 10)] assert p[1].get_label(False) == "test" assert p[1].rendering_kw == {} # multiple parametric expressions same range p = plot_parametric((x + 1, x), (x ** 2, x + 1), (x, -2, 2)) assert p[0].expr == (x + 1, x) assert p[0].ranges == [(x, -2, 2)] assert p[0].get_label(False) == "x" assert p[0].rendering_kw == {} assert p[1].expr == (x ** 2, x + 1) assert p[1].ranges == [(x, -2, 2)] assert p[1].get_label(False) == "x" assert p[1].rendering_kw == {} # multiple parametric expressions, custom ranges and labels p = plot_parametric( (x + 1, x, (x, -2, 2), "test1"), (x ** 2, x + 1, (x, -3, 3), "test2", {"cmap": "Reds"})) assert p[0].expr == (x + 1, x) assert p[0].ranges == [(x, -2, 2)] assert p[0].get_label(False) == "test1" assert p[0].rendering_kw == {} assert p[1].expr == (x ** 2, x + 1) assert p[1].ranges == [(x, -3, 3)] assert p[1].get_label(False) == "test2" assert p[1].rendering_kw == {"cmap": "Reds"} # single argument: lambda function fx = lambda t: t fy = lambda t: 2 * t p = plot_parametric(fx, fy) assert all(callable(t) for t in p[0].expr) assert p[0].ranges[0][1:] == (-10, 10) assert "Dummy" in p[0].get_label(False) assert p[0].rendering_kw == {} # single argument: lambda function + custom range + label p = plot_parametric(fx, fy, ("t", 0, 2), "test") assert all(callable(t) for t in p[0].expr) assert p[0].ranges[0][1:] == (0, 2) assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {} def test_plot3d_parametric_line_arguments(): ### Test arguments for plot3d_parametric_line() if not matplotlib: skip("Matplotlib not the default backend") x, y = symbols("x, y") # single parametric expression p = plot3d_parametric_line(x + 1, x, sin(x)) assert isinstance(p[0], Parametric3DLineSeries) assert p[0].expr == (x + 1, x, sin(x)) assert p[0].ranges == [(x, -10, 10)] assert p[0].get_label(False) == "x" assert p[0].rendering_kw == {} # single parametric expression with custom range, label and rendering kws p = plot3d_parametric_line(x + 1, x, sin(x), (x, -2, 2), "test", {"cmap": "Reds"}) assert isinstance(p[0], Parametric3DLineSeries) assert p[0].expr == (x + 1, x, sin(x)) assert p[0].ranges == [(x, -2, 2)] assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {"cmap": "Reds"} p = plot3d_parametric_line((x + 1, x, sin(x)), (x, -2, 2), "test") assert p[0].expr == (x + 1, x, sin(x)) assert p[0].ranges == [(x, -2, 2)] assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {} # multiple parametric expression same symbol p = plot3d_parametric_line( (x + 1, x, sin(x)), (x ** 2, 1, cos(x), {"cmap": "Reds"})) assert p[0].expr == (x + 1, x, sin(x)) assert p[0].ranges == [(x, -10, 10)] assert p[0].get_label(False) == "x" assert p[0].rendering_kw == {} assert p[1].expr == (x ** 2, 1, cos(x)) assert p[1].ranges == [(x, -10, 10)] assert p[1].get_label(False) == "x" assert p[1].rendering_kw == {"cmap": "Reds"} # multiple parametric expression different symbols p = plot3d_parametric_line((x + 1, x, sin(x)), (y ** 2, 1, cos(y))) assert p[0].expr == (x + 1, x, sin(x)) assert p[0].ranges == [(x, -10, 10)] assert p[0].get_label(False) == "x" assert p[0].rendering_kw == {} assert p[1].expr == (y ** 2, 1, cos(y)) assert p[1].ranges == [(y, -10, 10)] assert p[1].get_label(False) == "y" assert p[1].rendering_kw == {} # multiple parametric expression, custom ranges and labels p = plot3d_parametric_line( (x + 1, x, sin(x)), (x ** 2, 1, cos(x), (x, -2, 2), "test", {"cmap": "Reds"})) assert p[0].expr == (x + 1, x, sin(x)) assert p[0].ranges == [(x, -10, 10)] assert p[0].get_label(False) == "x" assert p[0].rendering_kw == {} assert p[1].expr == (x ** 2, 1, cos(x)) assert p[1].ranges == [(x, -2, 2)] assert p[1].get_label(False) == "test" assert p[1].rendering_kw == {"cmap": "Reds"} # single argument: lambda function fx = lambda t: t fy = lambda t: 2 * t fz = lambda t: 3 * t p = plot3d_parametric_line(fx, fy, fz) assert all(callable(t) for t in p[0].expr) assert p[0].ranges[0][1:] == (-10, 10) assert "Dummy" in p[0].get_label(False) assert p[0].rendering_kw == {} # single argument: lambda function + custom range + label p = plot3d_parametric_line(fx, fy, fz, ("t", 0, 2), "test") assert all(callable(t) for t in p[0].expr) assert p[0].ranges[0][1:] == (0, 2) assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {} def test_plot3d_plot_contour_arguments(): ### Test arguments for plot3d() and plot_contour() if not matplotlib: skip("Matplotlib not the default backend") x, y = symbols("x, y") # single expression p = plot3d(x + y) assert isinstance(p[0], SurfaceOver2DRangeSeries) assert p[0].expr == x + y assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) assert p[0].get_label(False) == "x + y" assert p[0].rendering_kw == {} # single expression, custom range, label and rendering kws p = plot3d(x + y, (x, -2, 2), "test", {"cmap": "Reds"}) assert isinstance(p[0], SurfaceOver2DRangeSeries) assert p[0].expr == x + y assert p[0].ranges[0] == (x, -2, 2) assert p[0].ranges[1] == (y, -10, 10) assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {"cmap": "Reds"} p = plot3d(x + y, (x, -2, 2), (y, -4, 4), "test") assert p[0].ranges[0] == (x, -2, 2) assert p[0].ranges[1] == (y, -4, 4) # multiple expressions p = plot3d(x + y, x * y) assert p[0].expr == x + y assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) assert p[0].get_label(False) == "x + y" assert p[0].rendering_kw == {} assert p[1].expr == x * y assert p[1].ranges[0] == (x, -10, 10) or (y, -10, 10) assert p[1].ranges[1] == (x, -10, 10) or (y, -10, 10) assert p[1].get_label(False) == "x*y" assert p[1].rendering_kw == {} # multiple expressions, same custom ranges p = plot3d(x + y, x * y, (x, -2, 2), (y, -4, 4)) assert p[0].expr == x + y assert p[0].ranges[0] == (x, -2, 2) assert p[0].ranges[1] == (y, -4, 4) assert p[0].get_label(False) == "x + y" assert p[0].rendering_kw == {} assert p[1].expr == x * y assert p[1].ranges[0] == (x, -2, 2) assert p[1].ranges[1] == (y, -4, 4) assert p[1].get_label(False) == "x*y" assert p[1].rendering_kw == {} # multiple expressions, custom ranges, labels and rendering kws p = plot3d( (x + y, (x, -2, 2), (y, -4, 4)), (x * y, (x, -3, 3), (y, -6, 6), "test", {"cmap": "Reds"})) assert p[0].expr == x + y assert p[0].ranges[0] == (x, -2, 2) assert p[0].ranges[1] == (y, -4, 4) assert p[0].get_label(False) == "x + y" assert p[0].rendering_kw == {} assert p[1].expr == x * y assert p[1].ranges[0] == (x, -3, 3) assert p[1].ranges[1] == (y, -6, 6) assert p[1].get_label(False) == "test" assert p[1].rendering_kw == {"cmap": "Reds"} # single expression: lambda function f = lambda x, y: x + y p = plot3d(f) assert callable(p[0].expr) assert p[0].ranges[0][1:] == (-10, 10) assert p[0].ranges[1][1:] == (-10, 10) assert p[0].get_label(False) == "" assert p[0].rendering_kw == {} # single expression: lambda function + custom ranges + label p = plot3d(f, ("a", -5, 3), ("b", -2, 1), "test") assert callable(p[0].expr) assert p[0].ranges[0][1:] == (-5, 3) assert p[0].ranges[1][1:] == (-2, 1) assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {} # test issue 25818 # single expression, custom range, min/max functions p = plot3d(Min(x, y), (x, 0, 10), (y, 0, 10)) assert isinstance(p[0], SurfaceOver2DRangeSeries) assert p[0].expr == Min(x, y) assert p[0].ranges[0] == (x, 0, 10) assert p[0].ranges[1] == (y, 0, 10) assert p[0].get_label(False) == "Min(x, y)" assert p[0].rendering_kw == {} def test_plot3d_parametric_surface_arguments(): ### Test arguments for plot3d_parametric_surface() if not matplotlib: skip("Matplotlib not the default backend") x, y = symbols("x, y") # single parametric expression p = plot3d_parametric_surface(x + y, cos(x + y), sin(x + y)) assert isinstance(p[0], ParametricSurfaceSeries) assert p[0].expr == (x + y, cos(x + y), sin(x + y)) assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) assert p[0].get_label(False) == "(x + y, cos(x + y), sin(x + y))" assert p[0].rendering_kw == {} # single parametric expression, custom ranges, labels and rendering kws p = plot3d_parametric_surface(x + y, cos(x + y), sin(x + y), (x, -2, 2), (y, -4, 4), "test", {"cmap": "Reds"}) assert isinstance(p[0], ParametricSurfaceSeries) assert p[0].expr == (x + y, cos(x + y), sin(x + y)) assert p[0].ranges[0] == (x, -2, 2) assert p[0].ranges[1] == (y, -4, 4) assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {"cmap": "Reds"} # multiple parametric expressions p = plot3d_parametric_surface( (x + y, cos(x + y), sin(x + y)), (x - y, cos(x - y), sin(x - y), "test")) assert p[0].expr == (x + y, cos(x + y), sin(x + y)) assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) assert p[0].get_label(False) == "(x + y, cos(x + y), sin(x + y))" assert p[0].rendering_kw == {} assert p[1].expr == (x - y, cos(x - y), sin(x - y)) assert p[1].ranges[0] == (x, -10, 10) or (y, -10, 10) assert p[1].ranges[1] == (x, -10, 10) or (y, -10, 10) assert p[1].get_label(False) == "test" assert p[1].rendering_kw == {} # multiple parametric expressions, custom ranges and labels p = plot3d_parametric_surface( (x + y, cos(x + y), sin(x + y), (x, -2, 2), "test"), (x - y, cos(x - y), sin(x - y), (x, -3, 3), (y, -4, 4), "test2", {"cmap": "Reds"})) assert p[0].expr == (x + y, cos(x + y), sin(x + y)) assert p[0].ranges[0] == (x, -2, 2) assert p[0].ranges[1] == (y, -10, 10) assert p[0].get_label(False) == "test" assert p[0].rendering_kw == {} assert p[1].expr == (x - y, cos(x - y), sin(x - y)) assert p[1].ranges[0] == (x, -3, 3) assert p[1].ranges[1] == (y, -4, 4) assert p[1].get_label(False) == "test2" assert p[1].rendering_kw == {"cmap": "Reds"} # lambda functions instead of symbolic expressions for a single 3D # parametric surface p = plot3d_parametric_surface( lambda u, v: u, lambda u, v: v, lambda u, v: u + v, ("u", 0, 2), ("v", -3, 4)) assert all(callable(t) for t in p[0].expr) assert p[0].ranges[0][1:] == (-0, 2) assert p[0].ranges[1][1:] == (-3, 4) assert p[0].get_label(False) == "" assert p[0].rendering_kw == {} # lambda functions instead of symbolic expressions for multiple 3D # parametric surfaces p = plot3d_parametric_surface( (lambda u, v: u, lambda u, v: v, lambda u, v: u + v, ("u", 0, 2), ("v", -3, 4)), (lambda u, v: v, lambda u, v: u, lambda u, v: u - v, ("u", -2, 3), ("v", -4, 5), "test")) assert all(callable(t) for t in p[0].expr) assert p[0].ranges[0][1:] == (0, 2) assert p[0].ranges[1][1:] == (-3, 4) assert p[0].get_label(False) == "" assert p[0].rendering_kw == {} assert all(callable(t) for t in p[1].expr) assert p[1].ranges[0][1:] == (-2, 3) assert p[1].ranges[1][1:] == (-4, 5) assert p[1].get_label(False) == "test" assert p[1].rendering_kw == {}
DummyBackendOk
python
PyCQA__pylint
tests/functional/r/regression/regression_3535_double_enum_inherit.py
{ "start": 144, "end": 203 }
class ____(A): x = enum.auto() print(B.__members__['x'])
B
python
gevent__gevent
src/greentest/3.10/test_smtpd.py
{ "start": 318, "end": 944 }
class ____(smtpd.SMTPServer): def __init__(self, *args, **kwargs): smtpd.SMTPServer.__init__(self, *args, **kwargs) self.messages = [] if self._decode_data: self.return_status = 'return status' else: self.return_status = b'return status' def process_message(self, peer, mailfrom, rcpttos, data, **kw): self.messages.append((peer, mailfrom, rcpttos, data)) if data == self.return_status: return '250 Okish' if 'mail_options' in kw and 'SMTPUTF8' in kw['mail_options']: return '250 SMTPUTF8 message okish'
DummyServer
python
mitmproxy__pdoc
test/testdata/mermaid_demo.py
{ "start": 556, "end": 645 }
class ____(Pet): """🐕""" def bark(self, loud: bool = True): """*woof*"""
Dog
python
great-expectations__great_expectations
contrib/time_series_expectations/time_series_expectations/generator/weekly_time_series_generator.py
{ "start": 255, "end": 3409 }
class ____(DailyTimeSeriesGenerator): """Generate a weekly time series with trend, seasonality, and outliers.""" def generate_df( self, size: Optional[int] = 52 * 3, day_of_week: Optional[int] = 0, start_date: Optional[str] = "2018-01-01", trend_params: Optional[List[TrendParams]] = None, weekday_dummy_params: Optional[List[float]] = None, annual_seasonality_params: Optional[List[Tuple[float, float]]] = None, holiday_alpha: float = 3.5, outlier_alpha: float = 2.5, noise_scale: float = 1.0, ) -> pd.DataFrame: """Generate a time series as a pandas dataframe. Keyword Args: size: The number of days in the time series. day_of_week: The day of the week to generate the time series for start_date: The start date of the time series. trend_params: A list of trend parameters corresponding to cutpoints in the time series. weekday_dummy_params: A list of weekday dummy parameters. Should be a list of length 7, with each day corresponding to the average difference in the time series on that day. annual_seasonality_params: A list of annual seasonality parameters used to create a cyclic component in the time series. holiday_alpha: The alpha parameter for the pareto distribution used to generate holiday effects. outlier_alpha: The alpha parameter for the pareto distribution used to generate outlier effects. noise_scale: The scale parameter for the standard deviation of the normal distribution used to generate noise. Returns: A pandas dataframe with a date column and a time series column. Notes: * Holiday and outlier effects are generated using a pareto distribution. The alpha parameter controls the shape of the distribution. A higher alpha value will result in more extreme holiday and outlier effects. * Holidays don't correspond to actual holidays. Instead, they are generated by randomly selecting days in the time series. * Annual seasonality is generated by Fourier series. The number of fourier terms is determined by the length of the annual_seasonality_params list. The first element of each tuple in the list is the amplitude of the sine term, and the second element is the amplitude of the cosine term. """ # Start with a daily time series that includes all dates in the target range df = pd.DataFrame( { "ds": pd.date_range(start_date, periods=(size + 1) * 7, freq="D"), "y": self._generate_daily_time_series( (size + 1) * 7, trend_params, weekday_dummy_params, annual_seasonality_params, holiday_alpha, outlier_alpha, noise_scale, ), } ) # Limit to the target day of week df_sub = df[df.ds.map(lambda x: x.day_of_week == day_of_week)] return df_sub[:size]
WeeklyTimeSeriesGenerator
python
openai__openai-python
src/openai/resources/beta/threads/threads.py
{ "start": 46832, "end": 91849 }
class ____(AsyncAPIResource): @cached_property def runs(self) -> AsyncRuns: return AsyncRuns(self._client) @cached_property def messages(self) -> AsyncMessages: return AsyncMessages(self._client) @cached_property def with_raw_response(self) -> AsyncThreadsWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers """ return AsyncThreadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/openai/openai-python#with_streaming_response """ return AsyncThreadsWithStreamingResponse(self) @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API") async def create( self, *, messages: Iterable[thread_create_params.Message] | Omit = omit, metadata: Optional[Metadata] | Omit = omit, tool_resources: Optional[thread_create_params.ToolResources] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Thread: """ Create a thread. Args: messages: A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return await self._post( "/threads", body=await async_maybe_transform( { "messages": messages, "metadata": metadata, "tool_resources": tool_resources, }, thread_create_params.ThreadCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Thread, ) @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API") async def retrieve( self, thread_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Thread: """ Retrieves a thread. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return await self._get( f"/threads/{thread_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Thread, ) @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API") async def update( self, thread_id: str, *, metadata: Optional[Metadata] | Omit = omit, tool_resources: Optional[thread_update_params.ToolResources] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Thread: """ Modifies a thread. Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. tool_resources: A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return await self._post( f"/threads/{thread_id}", body=await async_maybe_transform( { "metadata": metadata, "tool_resources": tool_resources, }, thread_update_params.ThreadUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Thread, ) @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API") async def delete( self, thread_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> ThreadDeleted: """ Delete a thread. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return await self._delete( f"/threads/{thread_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ThreadDeleted, ) @overload @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API") async def create_and_run( self, *, assistant_id: str, instructions: Optional[str] | Omit = omit, max_completion_tokens: Optional[int] | Omit = omit, max_prompt_tokens: Optional[int] | Omit = omit, metadata: Optional[Metadata] | Omit = omit, model: Union[str, ChatModel, None] | Omit = omit, parallel_tool_calls: bool | Omit = omit, response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, stream: Optional[Literal[False]] | Omit = omit, temperature: Optional[float] | Omit = omit, thread: thread_create_and_run_params.Thread | Omit = omit, tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, top_p: Optional[float] | Omit = omit, truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run: """ Create a thread and run it in one request. Args: assistant_id: The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. instructions: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. max_completion_tokens: The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. parallel_tool_calls: Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. stream: If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. thread: Options to create a new thread. If no thread is provided when running a request, an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value and means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. tools: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @overload @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API") async def create_and_run( self, *, assistant_id: str, stream: Literal[True], instructions: Optional[str] | Omit = omit, max_completion_tokens: Optional[int] | Omit = omit, max_prompt_tokens: Optional[int] | Omit = omit, metadata: Optional[Metadata] | Omit = omit, model: Union[str, ChatModel, None] | Omit = omit, parallel_tool_calls: bool | Omit = omit, response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, temperature: Optional[float] | Omit = omit, thread: thread_create_and_run_params.Thread | Omit = omit, tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, top_p: Optional[float] | Omit = omit, truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> AsyncStream[AssistantStreamEvent]: """ Create a thread and run it in one request. Args: assistant_id: The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. stream: If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. instructions: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. max_completion_tokens: The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. parallel_tool_calls: Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. thread: Options to create a new thread. If no thread is provided when running a request, an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value and means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. tools: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @overload @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API") async def create_and_run( self, *, assistant_id: str, stream: bool, instructions: Optional[str] | Omit = omit, max_completion_tokens: Optional[int] | Omit = omit, max_prompt_tokens: Optional[int] | Omit = omit, metadata: Optional[Metadata] | Omit = omit, model: Union[str, ChatModel, None] | Omit = omit, parallel_tool_calls: bool | Omit = omit, response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, temperature: Optional[float] | Omit = omit, thread: thread_create_and_run_params.Thread | Omit = omit, tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, top_p: Optional[float] | Omit = omit, truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | AsyncStream[AssistantStreamEvent]: """ Create a thread and run it in one request. Args: assistant_id: The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. stream: If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. instructions: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. max_completion_tokens: The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard. Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. parallel_tool_calls: Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. thread: Options to create a new thread. If no thread is provided when running a request, an empty thread will be created. tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tools and instead generates a message. `auto` is the default value and means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user. Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. tool_resources: A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. tools: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API") @required_args(["assistant_id"], ["assistant_id", "stream"]) @typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API") async def create_and_run( self, *, assistant_id: str, instructions: Optional[str] | Omit = omit, max_completion_tokens: Optional[int] | Omit = omit, max_prompt_tokens: Optional[int] | Omit = omit, metadata: Optional[Metadata] | Omit = omit, model: Union[str, ChatModel, None] | Omit = omit, parallel_tool_calls: bool | Omit = omit, response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, stream: Optional[Literal[False]] | Literal[True] | Omit = omit, temperature: Optional[float] | Omit = omit, thread: thread_create_and_run_params.Thread | Omit = omit, tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, top_p: Optional[float] | Omit = omit, truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = not_given, ) -> Run | AsyncStream[AssistantStreamEvent]: extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})} return await self._post( "/threads/runs", body=await async_maybe_transform( { "assistant_id": assistant_id, "instructions": instructions, "max_completion_tokens": max_completion_tokens, "max_prompt_tokens": max_prompt_tokens, "metadata": metadata, "model": model, "parallel_tool_calls": parallel_tool_calls, "response_format": response_format, "stream": stream, "temperature": temperature, "thread": thread, "tool_choice": tool_choice, "tool_resources": tool_resources, "tools": tools, "top_p": top_p, "truncation_strategy": truncation_strategy, }, thread_create_and_run_params.ThreadCreateAndRunParamsStreaming if stream else thread_create_and_run_params.ThreadCreateAndRunParamsNonStreaming, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, stream=stream or False, stream_cls=AsyncStream[AssistantStreamEvent], ) async def create_and_run_poll( self, *, assistant_id: str, instructions: Optional[str] | Omit = omit, max_completion_tokens: Optional[int] | Omit = omit, max_prompt_tokens: Optional[int] | Omit = omit, metadata: Optional[Metadata] | Omit = omit, model: Union[str, ChatModel, None] | Omit = omit, parallel_tool_calls: bool | Omit = omit, response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, temperature: Optional[float] | Omit = omit, thread: thread_create_and_run_params.Thread | Omit = omit, tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, top_p: Optional[float] | Omit = omit, truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, poll_interval_ms: int | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ A helper to create a thread, start a run and then poll for a terminal state. More information on Run lifecycles can be found here: https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps """ run = await self.create_and_run( # pyright: ignore[reportDeprecated] assistant_id=assistant_id, instructions=instructions, max_completion_tokens=max_completion_tokens, max_prompt_tokens=max_prompt_tokens, metadata=metadata, model=model, parallel_tool_calls=parallel_tool_calls, response_format=response_format, temperature=temperature, stream=False, thread=thread, tool_resources=tool_resources, tool_choice=tool_choice, truncation_strategy=truncation_strategy, top_p=top_p, tools=tools, extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, ) return await self.runs.poll( # pyright: ignore[reportDeprecated] run.id, run.thread_id, extra_headers, extra_query, extra_body, timeout, poll_interval_ms ) @overload def create_and_run_stream( self, *, assistant_id: str, instructions: Optional[str] | Omit = omit, max_completion_tokens: Optional[int] | Omit = omit, max_prompt_tokens: Optional[int] | Omit = omit, metadata: Optional[Metadata] | Omit = omit, model: Union[str, ChatModel, None] | Omit = omit, parallel_tool_calls: bool | Omit = omit, response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, temperature: Optional[float] | Omit = omit, thread: thread_create_and_run_params.Thread | Omit = omit, tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, top_p: Optional[float] | Omit = omit, truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]: """Create a thread and stream the run back""" ... @overload def create_and_run_stream( self, *, assistant_id: str, instructions: Optional[str] | Omit = omit, max_completion_tokens: Optional[int] | Omit = omit, max_prompt_tokens: Optional[int] | Omit = omit, metadata: Optional[Metadata] | Omit = omit, model: Union[str, ChatModel, None] | Omit = omit, parallel_tool_calls: bool | Omit = omit, response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, temperature: Optional[float] | Omit = omit, thread: thread_create_and_run_params.Thread | Omit = omit, tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, top_p: Optional[float] | Omit = omit, truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, event_handler: AsyncAssistantEventHandlerT, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]: """Create a thread and stream the run back""" ... def create_and_run_stream( self, *, assistant_id: str, instructions: Optional[str] | Omit = omit, max_completion_tokens: Optional[int] | Omit = omit, max_prompt_tokens: Optional[int] | Omit = omit, metadata: Optional[Metadata] | Omit = omit, model: Union[str, ChatModel, None] | Omit = omit, parallel_tool_calls: bool | Omit = omit, response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit, temperature: Optional[float] | Omit = omit, thread: thread_create_and_run_params.Thread | Omit = omit, tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit, tool_resources: Optional[thread_create_and_run_params.ToolResources] | Omit = omit, tools: Optional[Iterable[AssistantToolParam]] | Omit = omit, top_p: Optional[float] | Omit = omit, truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | Omit = omit, event_handler: AsyncAssistantEventHandlerT | None = None, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ( AsyncAssistantStreamManager[AsyncAssistantEventHandler] | AsyncAssistantStreamManager[AsyncAssistantEventHandlerT] ): """Create a thread and stream the run back""" extra_headers = { "OpenAI-Beta": "assistants=v2", "X-Stainless-Stream-Helper": "threads.create_and_run_stream", "X-Stainless-Custom-Event-Handler": "true" if event_handler else "false", **(extra_headers or {}), } request = self._post( "/threads/runs", body=maybe_transform( { "assistant_id": assistant_id, "instructions": instructions, "max_completion_tokens": max_completion_tokens, "max_prompt_tokens": max_prompt_tokens, "metadata": metadata, "model": model, "parallel_tool_calls": parallel_tool_calls, "response_format": response_format, "temperature": temperature, "tool_choice": tool_choice, "stream": True, "thread": thread, "tools": tools, "tool_resources": tool_resources, "truncation_strategy": truncation_strategy, "top_p": top_p, }, thread_create_and_run_params.ThreadCreateAndRunParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, stream=True, stream_cls=AsyncStream[AssistantStreamEvent], ) return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
AsyncThreads
python
Pylons__pyramid
src/pyramid/interfaces.py
{ "start": 47380, "end": 50253 }
class ____(Interface): """An introspectable object used for configuration introspection. In addition to the methods below, objects which implement this interface must also implement all the methods of Python's ``collections.MutableMapping`` (the "dictionary interface"), and must be hashable.""" title = Attribute('Text title describing this introspectable') type_name = Attribute('Text type name describing this introspectable') order = Attribute( 'integer order in which registered with introspector ' '(managed by introspector, usually)' ) category_name = Attribute('introspection category name') discriminator = Attribute( 'introspectable discriminator (within category) (must be hashable)' ) discriminator_hash = Attribute('an integer hash of the discriminator') action_info = Attribute( 'An IActionInfo object representing the caller ' 'that invoked the creation of this introspectable ' '(usually a sentinel until updated during ' 'self.register)' ) def relate(category_name, discriminator): """Indicate an intent to relate this IIntrospectable with another IIntrospectable (the one associated with the ``category_name`` and ``discriminator``) during action execution. """ def unrelate(category_name, discriminator): """Indicate an intent to break the relationship between this IIntrospectable with another IIntrospectable (the one associated with the ``category_name`` and ``discriminator``) during action execution. """ def register(introspector, action_info): """Register this IIntrospectable with an introspector. This method is invoked during action execution. Adds the introspectable and its relations to the introspector. ``introspector`` should be an object implementing IIntrospector. ``action_info`` should be a object implementing the interface :class:`pyramid.interfaces.IActionInfo` representing the call that registered this introspectable. Pseudocode for an implementation of this method: .. code-block:: python def register(self, introspector, action_info): self.action_info = action_info introspector.add(self) for methodname, category_name, discriminator in self._relations: method = getattr(introspector, methodname) method((i.category_name, i.discriminator), (category_name, discriminator)) """ # noqa: E501 def __hash__(): """Introspectables must be hashable. The typical implementation of an introsepectable's __hash__ is:: return hash((self.category_name,) + (self.discriminator,)) """
IIntrospectable
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py
{ "start": 2033, "end": 2449 }
class ____: # -> generic_method[T: float](t: T) def generic_method(t: T) -> T: return t # This one is strange in particular because of the mix of old- and new-style # generics, but according to the PEP, this is okay "if the class, function, or # type alias does not use the new syntax." `more_generic` doesn't use the new # syntax, so it can use T from the module and U from the class scope.
NotGeneric
python
Textualize__textual
tests/command_palette/test_click_away.py
{ "start": 303, "end": 792 }
class ____(App[None]): COMMANDS = {SimpleSource} def on_mount(self) -> None: self.action_command_palette() async def test_clicking_outside_command_palette_closes_it() -> None: """Clicking 'outside' the command palette should make it go away.""" async with CommandPaletteApp().run_test() as pilot: assert isinstance(pilot.app.screen, CommandPalette) await pilot.click() assert not isinstance(pilot.app.screen, CommandPalette)
CommandPaletteApp
python
paramiko__paramiko
demos/forward.py
{ "start": 1507, "end": 7254 }
class ____(SocketServer.BaseRequestHandler): def handle(self): try: chan = self.ssh_transport.open_channel( "direct-tcpip", (self.chain_host, self.chain_port), self.request.getpeername(), ) except Exception as e: verbose( "Incoming request to %s:%d failed: %s" % (self.chain_host, self.chain_port, repr(e)) ) return if chan is None: verbose( "Incoming request to %s:%d was rejected by the SSH server." % (self.chain_host, self.chain_port) ) return verbose( "Connected! Tunnel open %r -> %r -> %r" % ( self.request.getpeername(), chan.getpeername(), (self.chain_host, self.chain_port), ) ) while True: r, w, x = select.select([self.request, chan], [], []) if self.request in r: data = self.request.recv(1024) if len(data) == 0: break chan.send(data) if chan in r: data = chan.recv(1024) if len(data) == 0: break self.request.send(data) peername = self.request.getpeername() chan.close() self.request.close() verbose("Tunnel closed from %r" % (peername,)) def forward_tunnel(local_port, remote_host, remote_port, transport): # this is a little convoluted, but lets me configure things for the Handler # object. (SocketServer doesn't give Handlers any way to access the outer # server normally.) class SubHander(Handler): chain_host = remote_host chain_port = remote_port ssh_transport = transport ForwardServer(("", local_port), SubHander).serve_forever() def verbose(s): if g_verbose: print(s) HELP = """\ Set up a forward tunnel across an SSH server, using paramiko. A local port (given with -p) is forwarded across an SSH session to an address:port from the SSH server. This is similar to the openssh -L option. """ def get_host_port(spec, default_port): "parse 'hostname:22' into a host and port, with the port optional" args = (spec.split(":", 1) + [default_port])[:2] args[1] = int(args[1]) return args[0], args[1] def parse_options(): global g_verbose parser = OptionParser( usage="usage: %prog [options] <ssh-server>[:<server-port>]", version="%prog 1.0", description=HELP, ) parser.add_option( "-q", "--quiet", action="store_false", dest="verbose", default=True, help="squelch all informational output", ) parser.add_option( "-p", "--local-port", action="store", type="int", dest="port", default=DEFAULT_PORT, help="local port to forward (default: %d)" % DEFAULT_PORT, ) parser.add_option( "-u", "--user", action="store", type="string", dest="user", default=getpass.getuser(), help="username for SSH authentication (default: %s)" % getpass.getuser(), ) parser.add_option( "-K", "--key", action="store", type="string", dest="keyfile", default=None, help="private key file to use for SSH authentication", ) parser.add_option( "", "--no-key", action="store_false", dest="look_for_keys", default=True, help="don't look for or use a private key file", ) parser.add_option( "-P", "--password", action="store_true", dest="readpass", default=False, help="read password (for key or password auth) from stdin", ) parser.add_option( "-r", "--remote", action="store", type="string", dest="remote", default=None, metavar="host:port", help="remote host and port to forward to", ) options, args = parser.parse_args() if len(args) != 1: parser.error("Incorrect number of arguments.") if options.remote is None: parser.error("Remote address required (-r).") g_verbose = options.verbose server_host, server_port = get_host_port(args[0], SSH_PORT) remote_host, remote_port = get_host_port(options.remote, SSH_PORT) return options, (server_host, server_port), (remote_host, remote_port) def main(): options, server, remote = parse_options() password = None if options.readpass: password = getpass.getpass("Enter SSH password: ") client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy()) verbose("Connecting to ssh host %s:%d ..." % (server[0], server[1])) try: client.connect( server[0], server[1], username=options.user, key_filename=options.keyfile, look_for_keys=options.look_for_keys, password=password, ) except Exception as e: print("*** Failed to connect to %s:%d: %r" % (server[0], server[1], e)) sys.exit(1) verbose( "Now forwarding port %d to %s:%d ..." % (options.port, remote[0], remote[1]) ) try: forward_tunnel( options.port, remote[0], remote[1], client.get_transport() ) except KeyboardInterrupt: print("C-c: Port forwarding stopped.") sys.exit(0) if __name__ == "__main__": main()
Handler
python
tensorflow__tensorflow
tensorflow/lite/python/tflite_convert_test.py
{ "start": 1900, "end": 4057 }
class ____(test_util.TensorFlowTestCase): def _getFilepath(self, filename): return os.path.join(self.get_temp_dir(), filename) def _run(self, flags_str, should_succeed, expected_ops_in_converted_model=None, expected_output_shapes=None): output_file = os.path.join(self.get_temp_dir(), 'model.tflite') tflite_bin = resource_loader.get_path_to_datafile('tflite_convert.par') cmdline = '{0} --output_file={1} {2}'.format(tflite_bin, output_file, flags_str) exitcode = os.system(cmdline) if exitcode == 0: with gfile.Open(output_file, 'rb') as model_file: content = model_file.read() self.assertEqual(content is not None, should_succeed) if expected_ops_in_converted_model: op_set = tflite_test_util.get_ops_list(content) for opname in expected_ops_in_converted_model: self.assertIn(opname, op_set) if expected_output_shapes: output_shapes = tflite_test_util.get_output_shapes(content) self.assertEqual(output_shapes, expected_output_shapes) os.remove(output_file) else: self.assertFalse(should_succeed) def _getKerasModelFile(self): x = np.array([[1.], [2.]]) y = np.array([[2.], [4.]]) model = keras.models.Sequential([ keras.layers.Dropout(0.2, input_shape=(1,)), keras.layers.Dense(1), ]) model.compile(optimizer='sgd', loss='mean_squared_error') model.fit(x, y, epochs=1) keras_file = self._getFilepath('model.h5') keras.models.save_model(model, keras_file) return keras_file def _getKerasFunctionalModelFile(self): """Returns a functional Keras model with output shapes [[1, 1], [1, 2]].""" input_tensor = keras.layers.Input(shape=(1,)) output1 = keras.layers.Dense(1, name='b')(input_tensor) output2 = keras.layers.Dense(2, name='a')(input_tensor) model = keras.models.Model(inputs=input_tensor, outputs=[output1, output2]) keras_file = self._getFilepath('functional_model.h5') keras.models.save_model(model, keras_file) return keras_file
TestModels
python
huggingface__transformers
src/transformers/models/xlm_roberta/modeling_xlm_roberta.py
{ "start": 10662, "end": 12072 }
class ____(nn.Module): def __init__(self, config, is_causal=False, layer_idx=None, is_cross_attention=False): super().__init__() self.is_cross_attention = is_cross_attention attention_class = XLMRobertaCrossAttention if is_cross_attention else XLMRobertaSelfAttention self.self = attention_class(config, is_causal=is_causal, layer_idx=layer_idx) self.output = XLMRobertaSelfOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask attention_output, attn_weights = self.self( hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) attention_output = self.output(attention_output, hidden_states) return attention_output, attn_weights
XLMRobertaAttention
python
pytorch__pytorch
torchgen/model.py
{ "start": 48311, "end": 49651 }
class ____: name: str supported_dtypes: OrderedSet[ScalarType] # key is stored here because it affects the semantics of name, # so its helpful to have them together for further processing ufunc_key: UfuncKey @staticmethod def parse(value: str, ufunc_key: UfuncKey) -> UfuncInnerLoop: name, supported_dtypes_str = value.split(" ", 1) assert supported_dtypes_str[0] == "(" assert supported_dtypes_str[-1] == ")" supported_dtypes: OrderedSet[ScalarType] = OrderedSet() for k in supported_dtypes_str[1:-1].split(", "): supported_dtypes |= ScalarType.parse_set(k) return UfuncInnerLoop( name=name, supported_dtypes=supported_dtypes, ufunc_key=ufunc_key ) # BackendIndex represents a backend. # The BackendIndex encodes per-operator information that is potentially different # for each backend. The most obvious example is the name of the kernel # (the 'dispatch' entry in native_functions.yaml). # However, there can be other examples of different backends having different information. # External backends can choose to opt their kernels to be structured independently from in-tree backends, # which means that this information isn't inherently tied to a NativeFunction- it's different per backend. @dataclass(frozen=True)
UfuncInnerLoop
python
django__django
tests/utils_tests/test_os_utils.py
{ "start": 161, "end": 846 }
class ____(unittest.TestCase): def test_base_path_ends_with_sep(self): drive, path = os.path.splitdrive(safe_join("/abc/", "abc")) self.assertEqual(path, "{0}abc{0}abc".format(os.path.sep)) def test_root_path(self): drive, path = os.path.splitdrive(safe_join("/", "path")) self.assertEqual( path, "{}path".format(os.path.sep), ) drive, path = os.path.splitdrive(safe_join("/", "")) self.assertEqual( path, os.path.sep, ) def test_parent_path(self): with self.assertRaises(SuspiciousFileOperation): safe_join("/abc/", "../def")
SafeJoinTests
python
pydantic__pydantic
tests/benchmarks/basemodel_eq_performance.py
{ "start": 6120, "end": 6554 }
class ____(Generic[K, V]): """Wrapper redirecting `__getitem__` to `get` and a sentinel value This makes is safe to use in `operator.itemgetter` when some keys may be missing """ wrapped: dict[K, V] def __getitem__(self, key: K, /) -> V | _SentinelType: return self.wrapped.get(key, _SENTINEL) def __contains__(self, key: K, /) -> bool: return self.wrapped.__contains__(key)
_SafeGetItemProxy
python
tensorflow__tensorflow
tensorflow/python/feature_column/feature_column_test.py
{ "start": 189237, "end": 191336 }
class ____(test.TestCase): # All transform tests are distributed in column test. # Here we only test multi column case and naming def transform_multi_column(self): bucketized_price = fc._bucketized_column( fc._numeric_column('price'), boundaries=[0, 2, 4, 6]) hashed_sparse = fc._categorical_column_with_hash_bucket('wire', 10) with ops.Graph().as_default(): features = { 'price': [[-1.], [5.]], 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) } transformed = _transform_features(features, [bucketized_price, hashed_sparse]) with _initialized_session(): self.assertIn(bucketized_price.name, transformed[bucketized_price].name) self.assertAllEqual([[0], [3]], transformed[bucketized_price]) self.assertIn(hashed_sparse.name, transformed[hashed_sparse].name) self.assertAllEqual([6, 4, 1], transformed[hashed_sparse].values) def test_column_order(self): """When the column is both dense and sparse, uses sparse tensors.""" class _LoggerColumn(_FeatureColumn): def __init__(self, name): self._name = name @property def name(self): return self._name def _transform_feature(self, inputs): del inputs self.call_order = call_logger['count'] call_logger['count'] += 1 return 'Anything' @property def _parse_example_spec(self): pass with ops.Graph().as_default(): column1 = _LoggerColumn('1') column2 = _LoggerColumn('2') call_logger = {'count': 0} _transform_features({}, [column1, column2]) self.assertEqual(0, column1.call_order) self.assertEqual(1, column2.call_order) call_logger = {'count': 0} _transform_features({}, [column2, column1]) self.assertEqual(0, column1.call_order) self.assertEqual(1, column2.call_order)
TransformFeaturesTest
python
kamyu104__LeetCode-Solutions
Python/2-keys-keyboard.py
{ "start": 35, "end": 424 }
class ____(object): def minSteps(self, n): """ :type n: int :rtype: int """ result = 0 p = 2 # the answer is the sum of prime factors while p**2 <= n: while n % p == 0: result += p n //= p p += 1 if n > 1: result += n return result
Solution
python
fluentpython__example-code-2e
24-class-metaprog/slots/slots_timing.py
{ "start": 711, "end": 850 }
class ____(metaclass=Correct2): pass o = Klass2() try: o.z = 3 except AttributeError as e: print('Raised as expected:', e)
Klass2
python
getsentry__sentry
tests/sentry/preprod/api/endpoints/test_project_preprod_artifact_install_details.py
{ "start": 352, "end": 7020 }
class ____(TestCase): def setUp(self) -> None: super().setUp() self.file = self.create_file( name="test_installable.ipa", type="application/octet-stream", ) self.login_as(user=self.user) def _get_url(self, artifact_id=None): artifact_id = artifact_id or self.preprod_artifact.id return f"/api/0/projects/{self.organization.slug}/{self.project.slug}/preprodartifacts/{artifact_id}/install-details/" def _create_ios_artifact(self, **kwargs): """Helper to create an iOS artifact with default valid extras""" defaults = { "project": self.project, "file_id": self.file.id, "state": PreprodArtifact.ArtifactState.PROCESSED, "artifact_type": PreprodArtifact.ArtifactType.XCARCHIVE, "installable_app_file_id": self.file.id, "build_version": "1.2.3", "extras": { "is_code_signature_valid": True, "profile_name": "Test Profile", "codesigning_type": "development", }, } defaults.update(kwargs) return PreprodArtifact.objects.create(**defaults) def _create_android_artifact(self, **kwargs): """Helper to create an Android artifact with default valid extras""" defaults = { "project": self.project, "file_id": self.file.id, "state": PreprodArtifact.ArtifactState.PROCESSED, "artifact_type": PreprodArtifact.ArtifactType.AAB, "installable_app_file_id": self.file.id, "build_version": "1.2.3", } defaults.update(kwargs) return PreprodArtifact.objects.create(**defaults) @patch("sentry.analytics.record") def test_ios_artifact_success(self, mock_analytics: MagicMock) -> None: """Test successful iOS artifact install details request""" self.preprod_artifact = self._create_ios_artifact() url = self._get_url() response = self.client.get(url) assert response.status_code == 200 data = response.json() # Verify response structure assert data["is_code_signature_valid"] is True assert data["profile_name"] == "Test Profile" assert data["codesigning_type"] == "development" assert "install_url" in data # Verify iOS-specific URL parameter assert "?response_format=plist" in data["install_url"] # Verify analytics was called assert_any_analytics_event( mock_analytics, PreprodArtifactApiInstallDetailsEvent( organization_id=self.project.organization_id, project_id=self.project.id, user_id=self.user.id, artifact_id=str(self.preprod_artifact.id), ), ) # Verify InstallablePreprodArtifact was created installable = InstallablePreprodArtifact.objects.get(preprod_artifact=self.preprod_artifact) assert installable.download_count == 0 assert installable.expiration_date is not None assert installable.expiration_date > timezone.now() @patch("sentry.analytics.record") def test_android_artifact_success(self, mock_analytics: MagicMock) -> None: """Test successful Android artifact install details request""" self.preprod_artifact = self._create_android_artifact() url = self._get_url() response = self.client.get(url) assert response.status_code == 200 data = response.json() # Verify response structure assert "install_url" in data # Verify Android-specific URL (no plist parameter) assert "?response_format=plist" not in data["install_url"] # Verify analytics was called assert_any_analytics_event( mock_analytics, PreprodArtifactApiInstallDetailsEvent( organization_id=self.project.organization_id, project_id=self.project.id, user_id=self.user.id, artifact_id=str(self.preprod_artifact.id), ), ) def test_artifact_not_found(self) -> None: """Test when artifact doesn't exist""" url = self._get_url(artifact_id=99999) response = self.client.get(url) assert response.status_code == 404 def test_invalid_code_signature(self) -> None: """Test when code signature is invalid""" self.preprod_artifact = self._create_ios_artifact(extras={"is_code_signature_valid": False}) url = self._get_url() response = self.client.get(url) assert response.status_code == 200 data = response.json() assert data["is_code_signature_valid"] is False assert "install_url" not in data def test_missing_extras(self) -> None: """Test when extras field is missing""" self.preprod_artifact = self._create_ios_artifact(extras=None) url = self._get_url() response = self.client.get(url) assert response.status_code == 200 data = response.json() assert data["is_code_signature_valid"] is False assert "install_url" not in data def test_missing_code_signature_valid_field(self) -> None: """Test when is_code_signature_valid field is missing from extras""" self.preprod_artifact = self._create_ios_artifact(extras={"profile_name": "Test Profile"}) url = self._get_url() response = self.client.get(url) assert response.status_code == 200 data = response.json() assert data["is_code_signature_valid"] is False assert "install_url" not in data def test_no_installable_file(self) -> None: """Test when installable_app_file_id is None""" self.preprod_artifact = self._create_ios_artifact(installable_app_file_id=None) url = self._get_url() response = self.client.get(url) assert response.status_code == 404 data = response.json() assert data["error"] == "Installable file not available" def test_unauthorized_access(self) -> None: """Test that unauthorized users cannot access the endpoint""" self.preprod_artifact = self._create_ios_artifact() # Create a different user and log them in other_user = self.create_user() self.login_as(user=other_user) url = self._get_url() response = self.client.get(url) # Should be denied access since user doesn't have access to the project assert response.status_code == 403
ProjectPreprodInstallDetailsEndpointTest
python
tensorflow__tensorflow
tensorflow/compiler/mlir/tfr/python/tfr_gen.py
{ "start": 7860, "end": 11880 }
class ____(object): """A Dict to cache the OpDef for the Python function name.""" def __init__(self): self._op_defs = {} def lookup(self, f_name, func_def=None, optional=False): if f_name in self._op_defs: return self._op_defs[f_name] if isinstance(func_def, types.FunctionType): if not hasattr(func_def, '_tfr_op_name'): # skip a non-composition function if optional: return (None, None) else: raise KeyError('OpDef does not exist: ' + f_name) op_name = getattr(func_def, '_tfr_op_name') elif not func_def: op_name = f_name else: # TODO(fengliuai): create one utility method to match different APIs. compose_dec = [] for dec in func_def.decorator_list: if isinstance(dec, ast.Call): if isinstance(dec.func, ast.Attribute) and dec.func.attr == 'Composite': compose_dec.append(dec) if isinstance(dec.func, ast.Name) and dec.func.id == 'Composite': compose_dec.append(dec) if not compose_dec: # skip a non-composition function if optional: return (None, None) else: raise KeyError('OpDef does not exist: ' + f_name) elif len(compose_dec) > 1: raise KeyError('More than one TF ops decomposes for.') else: op_name = compose_dec[0].args[0].value op_def = op_def_registry.get(op_name) if not op_def: raise ValueError('Not a registered op: ' + op_name) derived_attrs = _collect_derived_attrs_from_proto(op_def) self._op_defs[f_name] = (op_def, derived_attrs) return (op_def, derived_attrs) def mlir_external_funcs(self): tfr_funcs = set() for _, (op_def, derived_attrs) in sorted(self._op_defs.items()): tfr_func = '\ntfr.func @tf__{}_('.format(_camel_to_snake(op_def.name)) # tensor inputs inputs = [ _get_type_info_from_proto(arg_def) for arg_def in op_def.input_arg ] # attribute inputs. The attribute with default values are moved backwards. non_derived_attrs = [ attr for attr in op_def.attr if attr.name not in derived_attrs ] attrs_no_default = [ attr for attr in non_derived_attrs if not attr.HasField('default_value') ] attrs_with_default = [ attr for attr in non_derived_attrs if attr.HasField('default_value') ] attr_names = {'f32_', 'i32_', 'i64_', 'i1_'} # reserved for attr_def in attrs_no_default + attrs_with_default: inputs.append(_get_type_info_from_proto(None, attr_def)) attr_names.add(attr_def.name) # tensor outputs outputs = [ _get_type_info_from_proto(arg_def) for arg_def in op_def.output_arg ] inputs = ','.join(inputs) outputs = ','.join(outputs) attrs = ','.join(sorted(derived_attrs.union(attr_names))) tfr_funcs.add('{}{}) -> ({}) attributes {{{}}}'.format( tfr_func, inputs, outputs, attrs)) return sorted(list(tfr_funcs)) _PY_TYPE_TO_TFR = { bool: TFRTypes.I1, int: TFRTypes.I64, float: TFRTypes.F32, } _TF_DTYPE_TO_TFR = { 'bool': TFRTypes.I1, 'int64': TFRTypes.I64, 'int32': TFRTypes.I32, 'int16': TFRTypes.I16, 'int8': TFRTypes.I8, 'float32': TFRTypes.F32, } _AG_FIXED_RETURN_TYPE = { 'for_stmt': type(None), 'if_stmt': type(None), 'Undefined': TFRTypes.AG_UNDEFINED_VAL, } QN = qual_names.QN # TODO(mdan): Fix this with an importable module. AG_MODULE = api._TRANSPILER.get_extra_locals()['ag__'] # pylint:disable=protected-access # When an item is callable, the signature is (*operand_types) -> result_type(s) TFR_BUILTINS = { '_tfr_quant_act_range': (TFRTypes.TENSOR, TFRTypes.TENSOR), '_tfr_quant_rescale': TFRTypes.TENSOR, '_tfr_quant_raw_data': lambda input_type: input_type, '_tfr_quant_qparam': (TFRTypes.TENSOR, TFRTypes.TENSOR), '_tfr_quant_scale_factor': TFRTypes.TENSOR, }
OpDefCache
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/deprecated_versions/package.py
{ "start": 216, "end": 639 }
class ____(Package): """Package with the most recent version deprecated""" homepage = "http://www.example.com" url = "http://www.example.com/c-1.0.tar.gz" version( "1.1.0", sha256="abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890", deprecated=True, ) version("1.0.0", sha256="abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890")
DeprecatedVersions
python
apache__airflow
task-sdk/src/airflow/sdk/api/datamodels/activities.py
{ "start": 930, "end": 1078 }
class ____(BaseModel): ti: TaskInstance path: os.PathLike[str] token: str """The identity token for this workload"""
ExecuteTaskActivity
python
django__django
django/db/models/fields/related.py
{ "start": 54116, "end": 83729 }
class ____(RelatedField): """ Provide a many-to-many relation by using an intermediary model that holds two ForeignKey fields pointed at the two sides of the relation. Unless a ``through`` model was provided, ManyToManyField will use the create_many_to_many_intermediary_model factory to automatically generate the intermediary model. """ # Field flags many_to_many = True many_to_one = False one_to_many = False one_to_one = False rel_class = ManyToManyRel description = _("Many-to-many relationship") def __init__( self, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=None, through=None, through_fields=None, db_constraint=True, db_table=None, swappable=True, **kwargs, ): try: to._meta except AttributeError: if not isinstance(to, str): raise TypeError( "%s(%r) is invalid. First parameter to ManyToManyField " "must be either a model, a model name, or the string %r" % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) if symmetrical is None: symmetrical = to == RECURSIVE_RELATIONSHIP_CONSTANT if through is not None and db_table is not None: raise ValueError( "Cannot specify a db_table if an intermediary model is used." ) kwargs["rel"] = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, symmetrical=symmetrical, through=through, through_fields=through_fields, db_constraint=db_constraint, ) self.has_null_arg = "null" in kwargs super().__init__( related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, **kwargs, ) self.db_table = db_table self.swappable = swappable def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_unique(**kwargs), *self._check_relationship_model(**kwargs), *self._check_ignored_options(**kwargs), *self._check_table_uniqueness(**kwargs), ] def _check_unique(self, **kwargs): if self.unique: return [ checks.Error( "ManyToManyFields cannot be unique.", obj=self, id="fields.E330", ) ] return [] def _check_ignored_options(self, **kwargs): warnings = [] if self.has_null_arg: warnings.append( checks.Warning( "null has no effect on ManyToManyField.", obj=self, id="fields.W340", ) ) if self._validators: warnings.append( checks.Warning( "ManyToManyField does not support validators.", obj=self, id="fields.W341", ) ) if self.remote_field.symmetrical and self._related_name: warnings.append( checks.Warning( "related_name has no effect on ManyToManyField " 'with a symmetrical relationship, e.g. to "self".', obj=self, id="fields.W345", ) ) if self.db_comment: warnings.append( checks.Warning( "db_comment has no effect on ManyToManyField.", obj=self, id="fields.W346", ) ) return warnings def _check_relationship_model(self, from_model=None, **kwargs): from django.db.models.fields.composite import CompositePrimaryKey if hasattr(self.remote_field.through, "_meta"): qualified_model_name = "%s.%s" % ( self.remote_field.through._meta.app_label, self.remote_field.through.__name__, ) else: qualified_model_name = self.remote_field.through errors = [] if self.remote_field.through not in self.opts.apps.get_models( include_auto_created=True ): # The relationship model is not installed. errors.append( checks.Error( "Field specifies a many-to-many relation through model " "'%s', which has not been installed." % qualified_model_name, obj=self, id="fields.E331", ) ) else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) # Set some useful local variables to_model = resolve_relation(from_model, self.remote_field.model) from_model_name = from_model._meta.object_name if isinstance(to_model, str): to_model_name = to_model else: to_model_name = to_model._meta.object_name if self.remote_field.through_fields is None and not isinstance( to_model, str ): model_name = None if isinstance(to_model._meta.pk, CompositePrimaryKey): model_name = self.remote_field.model._meta.object_name elif isinstance(from_model._meta.pk, CompositePrimaryKey): model_name = from_model_name if model_name: errors.append( checks.Error( f"Field defines a relation involving model {model_name!r} " "which has a CompositePrimaryKey and such relations are " "not supported.", obj=self, id="fields.E347", ) ) relationship_model_name = self.remote_field.through._meta.object_name self_referential = from_model == to_model # Count foreign keys in intermediate model if self_referential: seen_self = sum( from_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) if seen_self > 2 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than two foreign keys " "to '%s', which is ambiguous. You must specify " "which two foreign keys Django should use via the " "through_fields keyword argument." % (self, from_model_name), hint=( "Use through_fields to specify which two foreign keys " "Django should use." ), obj=self.remote_field.through, id="fields.E333", ) ) else: # Count foreign keys in relationship model seen_from = sum( from_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) seen_to = sum( to_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) if seen_from > 1 and not self.remote_field.through_fields: errors.append( checks.Error( ( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "from '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." ) % (self, from_model_name), hint=( "If you want to create a recursive relationship, " 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id="fields.E334", ) ) if seen_to > 1 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "to '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." % (self, to_model_name), hint=( "If you want to create a recursive relationship, " 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id="fields.E335", ) ) if seen_from == 0 or seen_to == 0: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it does not have a foreign key to '%s' or '%s'." % (self, from_model_name, to_model_name), obj=self.remote_field.through, id="fields.E336", ) ) # Validate `through_fields`. if self.remote_field.through_fields is not None: # Validate that we're given an iterable of at least two items # and that none of them is "falsy". if not ( len(self.remote_field.through_fields) >= 2 and self.remote_field.through_fields[0] and self.remote_field.through_fields[1] ): errors.append( checks.Error( "Field specifies 'through_fields' but does not provide " "the names of the two link fields that should be used " "for the relation through model '%s'." % qualified_model_name, hint=( "Make sure you specify 'through_fields' as " "through_fields=('field1', 'field2')" ), obj=self, id="fields.E337", ) ) # Validate the given through fields -- they should be actual # fields on the through model, and also be foreign keys to the # expected models. else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) source, through, target = ( from_model, self.remote_field.through, self.remote_field.model, ) source_field_name, target_field_name = self.remote_field.through_fields[ :2 ] for field_name, related_model in ( (source_field_name, source), (target_field_name, target), ): possible_field_names = [] for f in through._meta.fields: if ( hasattr(f, "remote_field") and getattr(f.remote_field, "model", None) == related_model ): possible_field_names.append(f.name) if possible_field_names: hint = ( "Did you mean one of the following foreign keys to '%s': " "%s?" % ( related_model._meta.object_name, ", ".join(possible_field_names), ) ) else: hint = None try: field = through._meta.get_field(field_name) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The intermediary model '%s' has no field '%s'." % (qualified_model_name, field_name), hint=hint, obj=self, id="fields.E338", ) ) else: if not ( hasattr(field, "remote_field") and getattr(field.remote_field, "model", None) == related_model ): related_object_name = ( related_model if isinstance(related_model, str) else related_model._meta.object_name ) errors.append( checks.Error( "'%s.%s' is not a foreign key to '%s'." % ( through._meta.object_name, field_name, related_object_name, ), hint=hint, obj=self, id="fields.E339", ) ) return errors def _check_table_uniqueness(self, **kwargs): if ( isinstance(self.remote_field.through, str) or not self.remote_field.through._meta.managed ): return [] registered_tables = { model._meta.db_table: model for model in self.opts.apps.get_models(include_auto_created=True) if model != self.remote_field.through and model._meta.managed } m2m_db_table = self.m2m_db_table() model = registered_tables.get(m2m_db_table) # The second condition allows multiple m2m relations on a model if # some point to a through model that proxies another through model. if ( model and model._meta.concrete_model != self.remote_field.through._meta.concrete_model ): if model._meta.auto_created: def _get_field_name(model): for field in model._meta.auto_created._meta.many_to_many: if field.remote_field.through is model: return field.name opts = model._meta.auto_created._meta clashing_obj = "%s.%s" % (opts.label, _get_field_name(model)) else: clashing_obj = model._meta.label if settings.DATABASE_ROUTERS: error_class, error_id = checks.Warning, "fields.W344" error_hint = ( "You have configured settings.DATABASE_ROUTERS. Verify " "that the table of %r is correctly routed to a separate " "database." % clashing_obj ) else: error_class, error_id = checks.Error, "fields.E340" error_hint = None return [ error_class( "The field's intermediary table '%s' clashes with the " "table name of '%s'." % (m2m_db_table, clashing_obj), obj=self, hint=error_hint, id=error_id, ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() # Handle the simpler arguments. if self.db_table is not None: kwargs["db_table"] = self.db_table if self.remote_field.db_constraint is not True: kwargs["db_constraint"] = self.remote_field.db_constraint # Lowercase model names as they should be treated as case-insensitive. if isinstance(self.remote_field.model, str): if "." in self.remote_field.model: app_label, model_name = self.remote_field.model.split(".") kwargs["to"] = "%s.%s" % (app_label, model_name.lower()) else: kwargs["to"] = self.remote_field.model.lower() else: kwargs["to"] = self.remote_field.model._meta.label_lower if getattr(self.remote_field, "through", None) is not None: if isinstance(self.remote_field.through, str): kwargs["through"] = self.remote_field.through elif not self.remote_field.through._meta.auto_created: kwargs["through"] = self.remote_field.through._meta.label if through_fields := getattr(self.remote_field, "through_fields", None): kwargs["through_fields"] = through_fields # If swappable is True, then see if we're actually pointing to the # target of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error. if hasattr(kwargs["to"], "setting_name"): if kwargs["to"].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ManyToManyField pointing to a " "model that is swapped in place of more than one model " "(%s and %s)" % (kwargs["to"].setting_name, swappable_setting) ) kwargs["to"] = SettingsReference( kwargs["to"], swappable_setting, ) return name, path, args, kwargs def get_attname_column(self): attname, _ = super().get_attname_column() return attname, None def _get_path_info(self, direct=False, filtered_relation=None): """Called by both direct and indirect m2m traversal.""" int_model = self.remote_field.through linkfield1 = int_model._meta.get_field(self.m2m_field_name()) linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = linkfield1.reverse_path_infos if filtered_relation: join2infos = linkfield2.get_path_info(filtered_relation) else: join2infos = linkfield2.path_infos else: join1infos = linkfield2.reverse_path_infos if filtered_relation: join2infos = linkfield1.get_path_info(filtered_relation) else: join2infos = linkfield1.path_infos # Get join infos between the last model of join 1 and the first model # of join 2. Assume the only reason these may differ is due to model # inheritance. join1_final = join1infos[-1].to_opts join2_initial = join2infos[0].from_opts if join1_final is join2_initial: intermediate_infos = [] elif issubclass(join1_final.model, join2_initial.model): intermediate_infos = join1_final.get_path_to_parent(join2_initial.model) else: intermediate_infos = join2_initial.get_path_from_parent(join1_final.model) return [*join1infos, *intermediate_infos, *join2infos] def get_path_info(self, filtered_relation=None): return self._get_path_info(direct=True, filtered_relation=filtered_relation) @cached_property def path_infos(self): return self.get_path_info() def get_reverse_path_info(self, filtered_relation=None): return self._get_path_info(direct=False, filtered_relation=filtered_relation) @cached_property def reverse_path_infos(self): return self.get_reverse_path_info() def _get_m2m_db_table(self, opts): """ Function that can be curried to provide the m2m table name for this relation. """ if self.remote_field.through is not None: return self.remote_field.through._meta.db_table elif self.db_table: return self.db_table else: m2m_table_name = "%s_%s" % (utils.strip_quotes(opts.db_table), self.name) return utils.truncate_name(m2m_table_name, connection.ops.max_name_length()) def _get_m2m_attr(self, related, attr): """ Function that can be curried to provide the source accessor or DB column name for the m2m table. """ cache_attr = "_m2m_%s_cache" % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[0] else: link_field_name = None for f in self.remote_field.through._meta.fields: if ( f.is_relation and f.remote_field.model == related.related_model and (link_field_name is None or link_field_name == f.name) ): setattr(self, cache_attr, getattr(f, attr)) return getattr(self, cache_attr) def _get_m2m_reverse_attr(self, related, attr): """ Function that can be curried to provide the related accessor or DB column name for the m2m table. """ cache_attr = "_m2m_reverse_%s_cache" % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) found = False if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[1] else: link_field_name = None for f in self.remote_field.through._meta.fields: if f.is_relation and f.remote_field.model == related.model: if link_field_name is None and related.related_model == related.model: # If this is an m2m-intermediate to self, # the first foreign key you find will be # the source column. Keep searching for # the second foreign key. if found: setattr(self, cache_attr, getattr(f, attr)) break else: found = True elif link_field_name is None or link_field_name == f.name: setattr(self, cache_attr, getattr(f, attr)) break return getattr(self, cache_attr) def contribute_to_class(self, cls, name, **kwargs): # To support multiple relations to self, it's useful to have a non-None # related name on symmetrical relations for internal reasons. The # concept doesn't make a lot of sense externally ("you want me to # specify *what* on my non-reversible relation?!"), so we set it up # automatically. The funky name reduces the chance of an accidental # clash. if self.remote_field.symmetrical and ( self.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT or self.remote_field.model == cls._meta.object_name ): self.remote_field.related_name = "%s_rel_+" % name elif self.remote_field.hidden: # If the backwards relation is disabled, replace the original # related_name with one generated from the m2m field name. Django # still uses backwards relations internally and we need to avoid # clashes between multiple m2m fields with related_name == '+'. self.remote_field.related_name = "_%s_%s_%s_+" % ( cls._meta.app_label, cls.__name__.lower(), name, ) super().contribute_to_class(cls, name, **kwargs) # The intermediate m2m model is not auto created if: # 1) There is a manually specified intermediate, or # 2) The class owning the m2m field is abstract. # 3) The class owning the m2m field has been swapped out. if not cls._meta.abstract: if self.remote_field.through: def resolve_through_model(_, model, field): field.remote_field.through = model lazy_related_operation( resolve_through_model, cls, self.remote_field.through, field=self ) elif not cls._meta.swapped: self.remote_field.through = create_many_to_many_intermediary_model( self, cls ) # Add the descriptor for the m2m relation. setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False)) # Set up the accessor for the m2m table name for the relation. self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta) def contribute_to_related_class(self, cls, related): # Internal M2Ms (i.e., those with a related name ending with '+') # and swapped models don't get a related descriptor. if not self.remote_field.hidden and not related.related_model._meta.swapped: setattr( cls, related.accessor_name, ManyToManyDescriptor(self.remote_field, reverse=True), ) # Set up the accessors for the column names on the m2m table. self.m2m_column_name = partial(self._get_m2m_attr, related, "column") self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, "column") self.m2m_field_name = partial(self._get_m2m_attr, related, "name") self.m2m_reverse_field_name = partial( self._get_m2m_reverse_attr, related, "name" ) get_m2m_rel = partial(self._get_m2m_attr, related, "remote_field") self.m2m_target_field_name = lambda: get_m2m_rel().field_name get_m2m_reverse_rel = partial( self._get_m2m_reverse_attr, related, "remote_field" ) self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name def set_attributes_from_rel(self): pass def value_from_object(self, obj): return list(getattr(obj, self.attname).all()) if obj._is_pk_set() else [] def save_form_data(self, instance, data): getattr(instance, self.attname).set(data) def formfield(self, *, using=None, **kwargs): defaults = { "form_class": forms.ModelMultipleChoiceField, "queryset": self.remote_field.model._default_manager.using(using), **kwargs, } # If initial is passed in, it's a list of related objects, but the # MultipleChoiceField takes a list of IDs. if defaults.get("initial") is not None: initial = defaults["initial"] if callable(initial): initial = initial() defaults["initial"] = [i.pk for i in initial] return super().formfield(**defaults) def db_check(self, connection): return None def db_type(self, connection): # A ManyToManyField is not represented by a single column, # so return None. return None def db_parameters(self, connection): return {"type": None, "check": None}
ManyToManyField
python
getsentry__sentry
src/sentry/replays/usecases/ingest/__init__.py
{ "start": 2322, "end": 3430 }
class ____(msgspec.Struct, gc=False, tag_field="type", tag=5): data: CustomEventData | None = None RRWebEvent = ( DomContentLoadedEvent | LoadedEvent | FullSnapshotEvent | IncrementalSnapshotEvent | MetaEvent | CustomEvent | PluginEvent ) def parse_recording_data(payload: bytes) -> list[dict]: try: # We're parsing with msgspec (if we can) and then transforming to the type that # JSON.loads returns. return [ {"type": 5, "data": {"tag": e.data.tag, "payload": e.data.payload}} for e in msgspec.json.decode(payload, type=list[RRWebEvent]) if isinstance(e, CustomEvent) and e.data is not None ] except Exception: # We're emitting a metric instead of logging in case this thing really fails hard in # prod. We don't want a huge volume of logs slowing throughput. If there's a # significant volume of this metric we'll test against a broader cohort of data. metrics.incr("replays.recording_consumer.msgspec_decode_error") return json.loads(payload)
CustomEvent
python
openai__openai-python
src/openai/types/beta/realtime/transcription_session_update.py
{ "start": 428, "end": 801 }
class ____(BaseModel): anchor: Optional[Literal["created_at"]] = None """The anchor point for the ephemeral token expiration. Only `created_at` is currently supported. """ seconds: Optional[int] = None """The number of seconds from the anchor point to the expiration. Select a value between `10` and `7200`. """
SessionClientSecretExpiresAt
python
spyder-ide__spyder
spyder/plugins/pylint/confpage.py
{ "start": 594, "end": 2566 }
class ____(PluginConfigPage): def setup_page(self): settings_group = QGroupBox(_("Settings")) save_box = self.create_checkbox(_("Save file before analyzing it"), 'save_before', default=True) hist_group = QGroupBox(_("History")) hist_label1 = QLabel(_("The following option will be applied at next " "startup.")) hist_label1.setWordWrap(True) hist_spin = self.create_spinbox( _("History: "), _(" results"), "max_entries", self.get_option('max_entries'), min_=MIN_HISTORY_ENTRIES, max_=MAX_HISTORY_ENTRIES, step=1, ) results_group = QGroupBox(_("Results")) results_label1 = QLabel(_("Results are stored here:")) results_label1.setWordWrap(True) # Warning: do not try to regroup the following QLabel contents with # widgets above -- this string was isolated here in a single QLabel # on purpose: to fix spyder-ide/spyder#863. results_label2 = QLabel(PylintWidget.DATAPATH) results_label2.setTextInteractionFlags(Qt.TextSelectableByMouse) results_label2.setWordWrap(True) settings_layout = QVBoxLayout() settings_layout.addWidget(save_box) settings_group.setLayout(settings_layout) hist_layout = QVBoxLayout() hist_layout.addWidget(hist_label1) hist_layout.addWidget(hist_spin) hist_group.setLayout(hist_layout) results_layout = QVBoxLayout() results_layout.addWidget(results_label1) results_layout.addWidget(results_label2) results_group.setLayout(results_layout) vlayout = QVBoxLayout() vlayout.addWidget(settings_group) vlayout.addWidget(hist_group) vlayout.addWidget(results_group) vlayout.addStretch(1) self.setLayout(vlayout)
PylintConfigPage
python
weaviate__weaviate-python-client
weaviate/users/base.py
{ "start": 553, "end": 3866 }
class ____(Generic[ConnectionType]): def __init__(self, connection: ConnectionType): self._connection = connection def _get_roles_of_user( self, user_id: str, user_type: USER_TYPE, include_permissions: bool, ) -> executor.Result[Union[Dict[str, Role], Dict[str, RoleBase]]]: path = f"/authz/users/{escape_string(user_id)}/roles/{user_type}" def resp(res: Response) -> Union[Dict[str, Role], Dict[str, RoleBase]]: roles = res.json() if include_permissions: return {role["name"]: Role._from_weaviate_role(role) for role in roles} return {role["name"]: RoleBase(role["name"]) for role in roles} return executor.execute( response_callback=resp, method=self._connection.get, path=path, params={"includeFullRoles": include_permissions}, error_msg=f"Could not get roles of user {user_id}", status_codes=_ExpectedStatusCodes(ok_in=[200], error="Get roles of user"), ) def _get_roles_of_user_deprecated( self, user_id: str, ) -> executor.Result[Union[Dict[str, Role], Dict[str, RoleBase]]]: path = f"/authz/users/{escape_string(user_id)}/roles" def resp(res: Response) -> Union[Dict[str, Role], Dict[str, RoleBase]]: return {role["name"]: Role._from_weaviate_role(role) for role in res.json()} return executor.execute( response_callback=resp, method=self._connection.get, path=path, error_msg=f"Could not get roles of user {user_id}", status_codes=_ExpectedStatusCodes(ok_in=[200], error="Get roles of user"), ) def _assign_roles_to_user( self, roles: List[str], user_id: str, user_type: Optional[USER_TYPE], ) -> executor.Result[None]: path = f"/authz/users/{escape_string(user_id)}/assign" payload: Dict[str, Any] = {"roles": roles} if user_type is not None: payload["userType"] = user_type def resp(res: Response) -> None: pass return executor.execute( response_callback=resp, method=self._connection.post, path=path, weaviate_object=payload, error_msg=f"Could not assign roles {roles} to user {user_id}", status_codes=_ExpectedStatusCodes(ok_in=[200], error="Assign user to roles"), ) def _revoke_roles_from_user( self, roles: Union[str, List[str]], user_id: str, user_type: Optional[USER_TYPE], ) -> executor.Result[None]: path = f"/authz/users/{escape_string(user_id)}/revoke" payload: Dict[str, Any] = {"roles": roles} if user_type is not None: payload["userType"] = user_type def resp(res: Response) -> None: pass return executor.execute( response_callback=resp, method=self._connection.post, path=path, weaviate_object={"roles": roles}, error_msg=f"Could not revoke roles {roles} from user {user_id}", status_codes=_ExpectedStatusCodes(ok_in=[200], error="Revoke roles from user"), )
_BaseExecutor
python
kubernetes-client__python
kubernetes/client/models/v1_rolling_update_stateful_set_strategy.py
{ "start": 383, "end": 6231 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'max_unavailable': 'object', 'partition': 'int' } attribute_map = { 'max_unavailable': 'maxUnavailable', 'partition': 'partition' } def __init__(self, max_unavailable=None, partition=None, local_vars_configuration=None): # noqa: E501 """V1RollingUpdateStatefulSetStrategy - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._max_unavailable = None self._partition = None self.discriminator = None if max_unavailable is not None: self.max_unavailable = max_unavailable if partition is not None: self.partition = partition @property def max_unavailable(self): """Gets the max_unavailable of this V1RollingUpdateStatefulSetStrategy. # noqa: E501 The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable. # noqa: E501 :return: The max_unavailable of this V1RollingUpdateStatefulSetStrategy. # noqa: E501 :rtype: object """ return self._max_unavailable @max_unavailable.setter def max_unavailable(self, max_unavailable): """Sets the max_unavailable of this V1RollingUpdateStatefulSetStrategy. The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable. # noqa: E501 :param max_unavailable: The max_unavailable of this V1RollingUpdateStatefulSetStrategy. # noqa: E501 :type: object """ self._max_unavailable = max_unavailable @property def partition(self): """Gets the partition of this V1RollingUpdateStatefulSetStrategy. # noqa: E501 Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0. # noqa: E501 :return: The partition of this V1RollingUpdateStatefulSetStrategy. # noqa: E501 :rtype: int """ return self._partition @partition.setter def partition(self, partition): """Sets the partition of this V1RollingUpdateStatefulSetStrategy. Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0. # noqa: E501 :param partition: The partition of this V1RollingUpdateStatefulSetStrategy. # noqa: E501 :type: int """ self._partition = partition def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1RollingUpdateStatefulSetStrategy): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1RollingUpdateStatefulSetStrategy): return True return self.to_dict() != other.to_dict()
V1RollingUpdateStatefulSetStrategy
python
virgili0__Virgilio
Tools/regex-bin/regexPrinter.py
{ "start": 1983, "end": 2420 }
class ____(TreeNode): def __init__(self, token, value, quantifier, value_range, next_node): TreeNode.__init__(self, token, value, next_node) self.quantifier = quantifier self.value_range = value_range def print(self): printer = self.quantifier.get_printer(self.value_range) for value in printer(): for sub in self.next_node.print(): yield value+sub
ChooseNode
python
tensorflow__tensorflow
tensorflow/python/debug/lib/debug_events_writer_test.py
{ "start": 25237, "end": 28784 }
class ____(dumping_callback_test_lib.DumpingCallbackTestBase): """Test for DebugDataReader for multiple file sets under a dump root.""" def testReadingTwoFileSetsWithTheSameDumpRootSucceeds(self): # To simulate a multi-host data dump, we first generate file sets in two # different directories, with the same tfdbg_run_id, and then combine them. tfdbg_run_id = "foo" for i in range(2): writer = debug_events_writer.DebugEventsWriter( os.path.join(self.dump_root, str(i)), tfdbg_run_id, circular_buffer_size=-1) if i == 0: debugged_graph = debug_event_pb2.DebuggedGraph( graph_id="graph1", graph_name="graph1") writer.WriteDebuggedGraph(debugged_graph) op_name = "Op_0" graph_op_creation = debug_event_pb2.GraphOpCreation( op_type="FooOp", op_name=op_name, graph_id="graph1") writer.WriteGraphOpCreation(graph_op_creation) op_name = "Op_1" graph_op_creation = debug_event_pb2.GraphOpCreation( op_type="FooOp", op_name=op_name, graph_id="graph1") writer.WriteGraphOpCreation(graph_op_creation) for _ in range(10): trace = debug_event_pb2.GraphExecutionTrace( op_name="Op_%d" % i, tfdbg_context_id="graph1") writer.WriteGraphExecutionTrace(trace) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() # Move all files from the subdirectory /1 to subdirectory /0. dump_root_0 = os.path.join(self.dump_root, "0") src_paths = glob.glob(os.path.join(self.dump_root, "1", "*")) for src_path in src_paths: dst_path = os.path.join( dump_root_0, # Rename the file set to avoid file name collision. re.sub(r"(tfdbg_events\.\d+)", r"\g<1>1", os.path.basename(src_path))) os.rename(src_path, dst_path) with debug_events_reader.DebugDataReader(dump_root_0) as reader: reader.update() # Verify the content of the .graph_execution_traces file. trace_digests = reader.graph_execution_traces(digest=True) self.assertLen(trace_digests, 20) for _ in range(10): trace = reader.read_graph_execution_trace(trace_digests[i]) self.assertEqual(trace.op_name, "Op_0") for _ in range(10): trace = reader.read_graph_execution_trace(trace_digests[i + 10]) self.assertEqual(trace.op_name, "Op_1") def testReadingTwoFileSetsWithTheDifferentRootsLeadsToError(self): # To simulate a multi-host data dump, we first generate file sets in two # different directories, with different tfdbg_run_ids, and then combine # them. for i in range(2): writer = debug_events_writer.DebugEventsWriter( os.path.join(self.dump_root, str(i)), "run_id_%d" % i, circular_buffer_size=-1) writer.FlushNonExecutionFiles() writer.FlushExecutionFiles() # Move all files from the subdirectory /1 to subdirectory /0. dump_root_0 = os.path.join(self.dump_root, "0") src_paths = glob.glob(os.path.join(self.dump_root, "1", "*")) for src_path in src_paths: dst_path = os.path.join( dump_root_0, # Rename the file set to avoid file name collision. re.sub(r"(tfdbg_events\.\d+)", r"\g<1>1", os.path.basename(src_path))) os.rename(src_path, dst_path) with self.assertRaisesRegex(ValueError, r"Found multiple \(2\) tfdbg2 runs"): debug_events_reader.DebugDataReader(dump_root_0)
MultiSetReaderTest
python
matplotlib__matplotlib
lib/matplotlib/widgets.py
{ "start": 34474, "end": 45531 }
class ____(AxesWidget): r""" A GUI neutral set of check buttons. For the check buttons to remain responsive you must keep a reference to this object. Connect to the CheckButtons with the `.on_clicked` method. Attributes ---------- ax : `~matplotlib.axes.Axes` The parent Axes for the widget. labels : list of `~matplotlib.text.Text` The text label objects of the check buttons. """ def __init__(self, ax, labels, actives=None, *, useblit=True, label_props=None, frame_props=None, check_props=None): """ Add check buttons to `~.axes.Axes` instance *ax*. Parameters ---------- ax : `~matplotlib.axes.Axes` The parent Axes for the widget. labels : list of str The labels of the check buttons. actives : list of bool, optional The initial check states of the buttons. The list must have the same length as *labels*. If not given, all buttons are unchecked. useblit : bool, default: True Use blitting for faster drawing if supported by the backend. See the tutorial :ref:`blitting` for details. .. versionadded:: 3.7 label_props : dict of lists, optional Dictionary of `.Text` properties to be used for the labels. Each dictionary value should be a list of at least a single element. If the list is of length M, its values are cycled such that the Nth label gets the (N mod M) property. .. versionadded:: 3.7 frame_props : dict, optional Dictionary of scatter `.Collection` properties to be used for the check button frame. Defaults (label font size / 2)**2 size, black edgecolor, no facecolor, and 1.0 linewidth. .. versionadded:: 3.7 check_props : dict, optional Dictionary of scatter `.Collection` properties to be used for the check button check. Defaults to (label font size / 2)**2 size, black color, and 1.0 linewidth. .. versionadded:: 3.7 """ super().__init__(ax) _api.check_isinstance((dict, None), label_props=label_props, frame_props=frame_props, check_props=check_props) ax.set_xticks([]) ax.set_yticks([]) ax.set_navigate(False) if actives is None: actives = [False] * len(labels) self._useblit = useblit and self.canvas.supports_blit self._background = None ys = np.linspace(1, 0, len(labels)+2)[1:-1] label_props = _expand_text_props(label_props) self.labels = [ ax.text(0.25, y, label, transform=ax.transAxes, horizontalalignment="left", verticalalignment="center", **props) for y, label, props in zip(ys, labels, label_props)] text_size = np.array([text.get_fontsize() for text in self.labels]) / 2 frame_props = { 's': text_size**2, 'linewidth': 1, **cbook.normalize_kwargs(frame_props, collections.PathCollection), 'marker': 's', 'transform': ax.transAxes, } frame_props.setdefault('facecolor', frame_props.get('color', 'none')) frame_props.setdefault('edgecolor', frame_props.pop('color', 'black')) self._frames = ax.scatter([0.15] * len(ys), ys, **frame_props) check_props = { 'linewidth': 1, 's': text_size**2, **cbook.normalize_kwargs(check_props, collections.PathCollection), 'marker': 'x', 'transform': ax.transAxes, 'animated': self._useblit, } check_props.setdefault('facecolor', check_props.pop('color', 'black')) self._checks = ax.scatter([0.15] * len(ys), ys, **check_props) # The user may have passed custom colours in check_props, so we need to # create the checks (above), and modify the visibility after getting # whatever the user set. self._init_status(actives) self.connect_event('button_press_event', self._clicked) if self._useblit: self.connect_event('draw_event', self._clear) self._observers = cbook.CallbackRegistry(signals=["clicked"]) def _clear(self, event): """Internal event handler to clear the buttons.""" if self.ignore(event) or self.canvas.is_saving(): return self._background = self.canvas.copy_from_bbox(self.ax.bbox) self.ax.draw_artist(self._checks) def _clicked(self, event): if self.ignore(event) or event.button != 1 or not self.ax.contains(event)[0]: return idxs = [ # Indices of frames and of texts that contain the event. *self._frames.contains(event)[1]["ind"], *[i for i, text in enumerate(self.labels) if text.contains(event)[0]]] if idxs: coords = self._frames.get_offset_transform().transform( self._frames.get_offsets()) self.set_active( # Closest index, only looking in idxs. idxs[(((event.x, event.y) - coords[idxs]) ** 2).sum(-1).argmin()]) def set_label_props(self, props): """ Set properties of the `.Text` labels. .. versionadded:: 3.7 Parameters ---------- props : dict Dictionary of `.Text` properties to be used for the labels. Same format as label_props argument of :class:`CheckButtons`. """ _api.check_isinstance(dict, props=props) props = _expand_text_props(props) for text, prop in zip(self.labels, props): text.update(prop) def set_frame_props(self, props): """ Set properties of the check button frames. .. versionadded:: 3.7 Parameters ---------- props : dict Dictionary of `.Collection` properties to be used for the check button frames. """ _api.check_isinstance(dict, props=props) if 's' in props: # Keep API consistent with constructor. props['sizes'] = np.broadcast_to(props.pop('s'), len(self.labels)) self._frames.update(props) def set_check_props(self, props): """ Set properties of the check button checks. .. versionadded:: 3.7 Parameters ---------- props : dict Dictionary of `.Collection` properties to be used for the check button check. """ _api.check_isinstance(dict, props=props) if 's' in props: # Keep API consistent with constructor. props['sizes'] = np.broadcast_to(props.pop('s'), len(self.labels)) actives = self.get_status() self._checks.update(props) # If new colours are supplied, then we must re-apply the status. self._init_status(actives) def set_active(self, index, state=None): """ Modify the state of a check button by index. Callbacks will be triggered if :attr:`!eventson` is True. Parameters ---------- index : int Index of the check button to toggle. state : bool, optional If a boolean value, set the state explicitly. If no value is provided, the state is toggled. Raises ------ ValueError If *index* is invalid. TypeError If *state* is not boolean. """ if index not in range(len(self.labels)): raise ValueError(f'Invalid CheckButton index: {index}') _api.check_isinstance((bool, None), state=state) invisible = colors.to_rgba('none') facecolors = self._checks.get_facecolor() if state is None: state = colors.same_color(facecolors[index], invisible) facecolors[index] = self._active_check_colors[index] if state else invisible self._checks.set_facecolor(facecolors) if self.drawon: if self._useblit: if self._background is not None: self.canvas.restore_region(self._background) self.ax.draw_artist(self._checks) self.canvas.blit(self.ax.bbox) else: self.canvas.draw() if self.eventson: self._observers.process('clicked', self.labels[index].get_text()) def _init_status(self, actives): """ Initialize properties to match active status. The user may have passed custom colours in *check_props* to the constructor, or to `.set_check_props`, so we need to modify the visibility after getting whatever the user set. """ self._active_check_colors = self._checks.get_facecolor() if len(self._active_check_colors) == 1: self._active_check_colors = np.repeat(self._active_check_colors, len(actives), axis=0) self._checks.set_facecolor( [ec if active else "none" for ec, active in zip(self._active_check_colors, actives)]) def clear(self): """Uncheck all checkboxes.""" self._checks.set_facecolor(['none'] * len(self._active_check_colors)) if hasattr(self, '_lines'): for l1, l2 in self._lines: l1.set_visible(False) l2.set_visible(False) if self.drawon: self.canvas.draw() if self.eventson: # Call with no label, as all checkboxes are being cleared. self._observers.process('clicked', None) def get_status(self): """ Return a list of the status (True/False) of all of the check buttons. """ return [not colors.same_color(color, colors.to_rgba("none")) for color in self._checks.get_facecolors()] def get_checked_labels(self): """Return a list of labels currently checked by user.""" return [l.get_text() for l, box_checked in zip(self.labels, self.get_status()) if box_checked] def on_clicked(self, func): """ Connect the callback function *func* to button click events. Parameters ---------- func : callable When the button is clicked, call *func* with button label. When all buttons are cleared, call *func* with None. The callback func must have the signature:: def func(label: str | None) -> Any Return values may exist, but are ignored. Returns ------- A connection id, which can be used to disconnect the callback. """ return self._observers.connect('clicked', lambda text: func(text)) def disconnect(self, cid): """Remove the observer with connection id *cid*.""" self._observers.disconnect(cid)
CheckButtons
python
Textualize__textual
docs/examples/tutorial/stopwatch02.py
{ "start": 167, "end": 240 }
class ____(Digits): """A widget to display elapsed time."""
TimeDisplay
python
gevent__gevent
src/greentest/3.10/test_socket.py
{ "start": 171014, "end": 171238 }
class ____(SendrecvmsgConnectedBase, ConnectedStreamTestMixin, UnixStreamBase): pass @requireAttrs(socket.socket, "sendmsg") @requireAttrs(socket, "AF_UNIX")
SendrecvmsgUnixStreamTestBase
python
huggingface__transformers
tests/test_feature_extraction_common.py
{ "start": 697, "end": 2215 }
class ____: test_cast_dtype = None def test_feat_extract_to_json_string(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) obj = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key], value) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_init_without_params(self): feat_extract = self.feature_extraction_class() self.assertIsNotNone(feat_extract)
FeatureExtractionSavingTestMixin
python
getsentry__sentry
tests/sentry/integrations/aws_lambda/test_utils.py
{ "start": 7837, "end": 9701 }
class ____(TestCase): """Test the get_node_options_for_layer function for different layer scenarios.""" def test_v7_layer_name(self) -> None: """Test SentryNodeServerlessSDKv7 returns v7 SDK options.""" result = get_node_options_for_layer("SentryNodeServerlessSDKv7", None) assert result == "-r @sentry/serverless/dist/awslambda-auto" def test_sentry_node_serverless_sdk_version_236_boundary(self) -> None: """Test SentryNodeServerlessSDK at version boundary 236 returns v8 SDK options.""" result = get_node_options_for_layer("SentryNodeServerlessSDK", 236) assert result == "-r @sentry/aws-serverless/awslambda-auto" def test_v8_layer_name(self) -> None: """Test SentryNodeServerlessSDKv8 returns v8 SDK options with -r.""" result = get_node_options_for_layer("SentryNodeServerlessSDKv8", None) assert result == "-r @sentry/aws-serverless/awslambda-auto" def test_v10_layer_name(self) -> None: """Test SentryNodeServerlessSDKv10 at version boundary 13 returns v8 SDK options with -r.""" result = get_node_options_for_layer("SentryNodeServerlessSDKv10", 13) assert result == "-r @sentry/aws-serverless/awslambda-auto" def test_v10_layer_version_14_boundary(self) -> None: """Test SentryNodeServerlessSDKv10 at version boundary 14 returns v10+ SDK options with --import.""" result = get_node_options_for_layer("SentryNodeServerlessSDKv10", 14) assert result == "--import @sentry/aws-serverless/awslambda-auto" def test_v11_layer_name(self) -> None: """Test SentryNodeServerlessSDKv11 returns v10+ SDK options with --import.""" result = get_node_options_for_layer("SentryNodeServerlessSDKv11", None) assert result == "--import @sentry/aws-serverless/awslambda-auto"
GetNodeOptionsForLayerTest
python
getsentry__sentry
src/sentry/monitors/processing_errors/errors.py
{ "start": 3845, "end": 4014 }
class ____(TypedDict): """ This monitor can't accept checkins and is over quota """ type: Literal[ProcessingErrorType.MONITOR_OVER_QUOTA]
MonitorOverQuota
python
mwaskom__seaborn
tests/test_rcmod.py
{ "start": 2310, "end": 5374 }
class ____(RCParamFixtures): styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"] def test_default_return(self): current = rcmod.axes_style() self.assert_rc_params(current) def test_key_usage(self): _style_keys = set(rcmod._style_keys) for style in self.styles: assert not set(rcmod.axes_style(style)) ^ _style_keys def test_bad_style(self): with pytest.raises(ValueError): rcmod.axes_style("i_am_not_a_style") def test_rc_override(self): rc = {"axes.facecolor": "blue", "foo.notaparam": "bar"} out = rcmod.axes_style("darkgrid", rc) assert out["axes.facecolor"] == "blue" assert "foo.notaparam" not in out def test_set_style(self): for style in self.styles: style_dict = rcmod.axes_style(style) rcmod.set_style(style) self.assert_rc_params(style_dict) def test_style_context_manager(self): rcmod.set_style("darkgrid") orig_params = rcmod.axes_style() context_params = rcmod.axes_style("whitegrid") with rcmod.axes_style("whitegrid"): self.assert_rc_params(context_params) self.assert_rc_params(orig_params) @rcmod.axes_style("whitegrid") def func(): self.assert_rc_params(context_params) func() self.assert_rc_params(orig_params) def test_style_context_independence(self): assert set(rcmod._style_keys) ^ set(rcmod._context_keys) def test_set_rc(self): rcmod.set_theme(rc={"lines.linewidth": 4}) assert mpl.rcParams["lines.linewidth"] == 4 rcmod.set_theme() def test_set_with_palette(self): rcmod.reset_orig() rcmod.set_theme(palette="deep") assert utils.get_color_cycle() == palettes.color_palette("deep", 10) rcmod.reset_orig() rcmod.set_theme(palette="deep", color_codes=False) assert utils.get_color_cycle() == palettes.color_palette("deep", 10) rcmod.reset_orig() pal = palettes.color_palette("deep") rcmod.set_theme(palette=pal) assert utils.get_color_cycle() == palettes.color_palette("deep", 10) rcmod.reset_orig() rcmod.set_theme(palette=pal, color_codes=False) assert utils.get_color_cycle() == palettes.color_palette("deep", 10) rcmod.reset_orig() rcmod.set_theme() def test_reset_defaults(self): rcmod.reset_defaults() self.assert_rc_params(mpl.rcParamsDefault) rcmod.set_theme() def test_reset_orig(self): rcmod.reset_orig() self.assert_rc_params(mpl.rcParamsOrig) rcmod.set_theme() def test_set_is_alias(self): rcmod.set_theme(context="paper", style="white") params1 = mpl.rcParams.copy() rcmod.reset_orig() rcmod.set_theme(context="paper", style="white") params2 = mpl.rcParams.copy() self.assert_rc_params_equal(params1, params2) rcmod.set_theme()
TestAxesStyle
python
eth-brownie__brownie
brownie/exceptions.py
{ "start": 2186, "end": 5310 }
class ____(Exception): """ Raised when a call to a contract causes an EVM exception. Attributes ---------- message : str The full error message received from the RPC client. revert_msg : str The returned error string, if any. revert_type : str The error type. pc : int The program counter where the error was raised. txid : str The transaction ID that raised the error. """ def __init__(self, exc: ValueError) -> None: self.txid: HexStr = "" # type: ignore [assignment] self.source: str = "" self.revert_type: str = "" self.pc: Optional[int] = None self.revert_msg: Optional[str] = None self.dev_revert_msg: Optional[str] = None try: exc = exc.args[0] except Exception: pass if not (isinstance(exc, dict) and "message" in exc): raise ValueError(str(exc)) from None if "data" not in exc: raise ValueError(exc["message"]) from None exc_message: str = exc["message"] self.message: Final[str] = exc_message.rstrip(".") exc_data = exc["data"] if isinstance(exc_data, str) and exc_data.startswith("0x"): self.revert_type = "revert" self.revert_msg = decode_typed_error(exc_data) # type: ignore [arg-type] return try: txid = next(key for key in exc_data if key.startswith("0x")) except StopIteration: raise ValueError(exc["message"]) from None else: data: Dict[str, Any] = exc_data[txid] self.revert_type = data["error"] self.txid = txid self.source = "" self.pc = data.get("program_counter") if self.pc and self.revert_type == "revert": self.pc -= 1 reason = data.get("reason") if isinstance(reason, str) and reason.startswith("0x"): self.revert_msg = decode_typed_error(reason) # type: ignore [arg-type] else: self.revert_msg = reason if self.pc is None: self.dev_revert_msg = None else: self.dev_revert_msg = brownie.project.build._get_dev_revert(self.pc) if self.revert_msg is None and self.revert_type in ("revert", "invalid opcode"): self.revert_msg = self.dev_revert_msg elif self.revert_msg == "Failed assertion": self.revert_msg = self.dev_revert_msg or self.revert_msg def __str__(self) -> str: if not hasattr(self, "revert_type"): return str(self.message) msg = self.revert_type if self.revert_msg: msg = f"{msg}: {self.revert_msg}" if self.source: msg = f"{msg}\n{self.source}" return str(msg) def _with_attr(self, **kwargs) -> "VirtualMachineError": for key, value in kwargs.items(): setattr(self, key, value) if self.revert_msg == "Failed assertion": self.revert_msg = self.dev_revert_msg or self.revert_msg return self @final
VirtualMachineError
python
scrapy__scrapy
tests/test_downloader_handler_twisted_http11.py
{ "start": 626, "end": 782 }
class ____: @property def download_handler_cls(self) -> type[DownloadHandlerProtocol]: return HTTP11DownloadHandler
HTTP11DownloadHandlerMixin
python
walkccc__LeetCode
solutions/1400. Construct K Palindrome Strings/1400.py
{ "start": 0, "end": 355 }
class ____: def canConstruct(self, s: str, k: int) -> bool: # If |s| < k, we cannot construct k strings from the s. # If the number of letters that have odd counts > k, the minimum number of # palindromic strings we can construct is > k. return sum(freq & 1 for freq in collections.Counter(s).values()) <= k <= len(s)
Solution