language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-firestore/llama_index/vector_stores/firestore/base.py
{ "start": 2708, "end": 8515 }
class ____(BasePydanticVectorStore): """Firestore Vector Store.""" stores_text: bool = True flat_metadata: bool = True collection_name: str batch_size: Optional[int] = DEFAULT_BATCH_SIZE embedding_key: str = "embedding" text_key: str = "text" metadata_key: str = "metadata" distance_strategy: DistanceMeasure = DistanceMeasure.COSINE _client: Client def __init__( self, client: Optional[Client] = None, **kwargs: Any, ) -> None: """Initialize params.""" super().__init__(**kwargs) object.__setattr__(self, "_client", client_with_user_agent(client)) @classmethod def class_name(cls) -> str: return "FirestoreVectorStore" @property def client(self) -> Any: return self._client def add( self, nodes: List[BaseNode], ) -> List[str]: """Add nodes to vector store.""" ids = [] entries = [] for node in nodes: node_id = node.node_id metadata = node_to_metadata_dict( node, remove_text=not self.stores_text, flat_metadata=self.flat_metadata, ) entry = { self.embedding_key: node.get_embedding(), self.metadata_key: metadata, } ids.append(node_id) entries.append(entry) self._upsert_batch(entries, ids) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """Delete nodes using with ref_doc_id.""" docs = ( self._client.collection(self.collection_name) .where("metadata.ref_doc_id", "==", ref_doc_id) .stream() ) self._delete_batch([doc.id for doc in docs]) def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult: """Query vector store.""" if query.query_embedding is None: raise ValueError("Query embedding is required.") filters = _to_firestore_filter(query.filters) if query.filters else None results = self._similarity_search( query.query_embedding, query.similarity_top_k, filters=filters, **kwargs ) top_k_ids = [] top_k_nodes = [] top_k_similarities = [] LOGGER.debug(f"Found {len(results)} results.") for result in results: # Convert the Firestore document to dict result_dict = result.to_dict() or {} metadata = result_dict.get(self.metadata_key) or {} fir_vec: Optional[Vector] = result_dict.get(self.embedding_key) if fir_vec is None: raise ValueError( "Embedding is missing in Firestore document.", result.id ) embedding = list(fir_vec.to_map_value()["value"]) # Convert metadata to node, and add text if available node = metadata_dict_to_node(metadata, text=result_dict.get(self.text_key)) # Keep track of the top k ids and nodes top_k_ids.append(result.id) top_k_nodes.append(node) top_k_similarities.append( similarity( query.query_embedding, embedding, self._distance_to_similarity_mode(self.distance_strategy), ) ) return VectorStoreQueryResult( nodes=top_k_nodes, ids=top_k_ids, similarities=top_k_similarities ) def _distance_to_similarity_mode(self, distance: DistanceMeasure) -> SimilarityMode: """Convert Firestore's distance measure to similarity mode.""" return { DistanceMeasure.COSINE: SimilarityMode.DEFAULT, DistanceMeasure.EUCLIDEAN: SimilarityMode.EUCLIDEAN, DistanceMeasure.DOT_PRODUCT: SimilarityMode.DOT_PRODUCT, }.get(distance, SimilarityMode.DEFAULT) def _delete_batch(self, ids: List[str]) -> None: """Delete batch of vectors from Firestore.""" db_batch = self._client.batch() for batch in more_itertools.chunked(ids, DEFAULT_BATCH_SIZE): for doc_id in batch: doc = self._client.collection(self.collection_name).document(doc_id) db_batch.delete(doc) db_batch.commit() def _upsert_batch(self, entries: List[dict], ids: Optional[List[str]]) -> None: """Upsert batch of vectors to Firestore.""" if ids and len(ids) != len(entries): raise ValueError("Length of ids and entries should be the same.") db_batch = self._client.batch() for batch in more_itertools.chunked(entries, DEFAULT_BATCH_SIZE): for i, entry in enumerate(batch): # Convert the embedding array to a Firestore Vector entry[self.embedding_key] = Vector(entry[self.embedding_key]) doc = self._client.collection(self.collection_name).document( ids[i] if ids else None ) db_batch.set(doc, entry, merge=True) db_batch.commit() def _similarity_search( self, query: List[float], k: int, filters: Union[BaseFilter, BaseCompositeFilter, None] = None, ) -> List[DocumentSnapshot]: wfilters = None collection = self._client.collection(self.collection_name) if filters: wfilters = collection.where(filter=filters) results = (wfilters or collection).find_nearest( vector_field=self.embedding_key, query_vector=Vector(query), distance_measure=self.distance_strategy, limit=k, ) return results.get()
FirestoreVectorStore
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_comm.py
{ "start": 2434, "end": 11777 }
class ____(FSDPTestMultiThread): @property def world_size(self) -> int: return 128 @property def device(self) -> torch.device: return torch.device(device_type.type, 0) def _get_param_sizes(self) -> list[torch.Size]: # For world size 128, the fp32 all-gather and reduce-scatter testing # requires ~0.22 GB return [ torch.Size([17, 257]), torch.Size([17]), torch.Size([64, 312]), torch.Size([64]), torch.Size([64, 64]), torch.Size([512, 64]), torch.Size([256]), torch.Size([64, 297]), ] def _init_params(self, param_sizes: list[torch.Size]) -> list[nn.Parameter]: torch.manual_seed(42) orig_params = [ nn.Parameter(torch.randn(size, device=self.device)) for size in param_sizes ] # Since seed is per process, not per thread, we broadcast to ensure the # same original parameters across ranks for orig_param in orig_params: dist.broadcast(orig_param, src=0) return orig_params def _init_fsdp_param_group( self, params: list[nn.Parameter], reshard_after_forward: Union[bool, int] ): module = nn.ParameterList([param.detach().clone() for param in params]) mesh_info = FSDPMeshInfo(_init_default_fully_shard_mesh(), shard_mesh_dim=0) post_forward_mesh_info = _get_post_forward_mesh_info( reshard_after_forward, mesh_info ) fsdp_param_group = FSDPParamGroup( list(module.parameters()), (module,), mesh_info, post_forward_mesh_info, self.device, None, # shard_placement_fn MixedPrecisionPolicy(), OffloadPolicy(), ) fsdp_param_group.lazy_init() return fsdp_param_group @skip_if_lt_x_gpu(1) def test_all_gather_fp32(self): param_sizes = self._get_param_sizes() default_stream = device_module.current_stream() stream1, stream2 = ( device_module.Stream(), device_module.Stream(), ) for async_op, streams, reshard_after_forward in itertools.product( (False, True), ((default_stream, default_stream), (stream1, stream2)), (True, 8), ): all_gather_copy_in_stream, all_gather_stream = streams # Save test time by only testing reshard after forward as an int # for non-async and non-default streams (like in pre-backward) if type(reshard_after_forward) is int and ( async_op or all_gather_stream is default_stream ): continue self._test_all_gather( param_sizes, reshard_after_forward=reshard_after_forward, async_op=async_op, all_gather_copy_in_stream=all_gather_copy_in_stream, all_gather_stream=all_gather_stream, ) def _test_all_gather( self, param_sizes: list[torch.Size], reshard_after_forward: Union[bool, int], async_op: bool, all_gather_copy_in_stream, all_gather_stream, ): def all_gather(fsdp_param_group: FSDPParamGroup, group: dist.ProcessGroup): all_gather_comm = DefaultAllGather() all_gather_result = foreach_all_gather( fsdp_param_group.fsdp_params, group, async_op=async_op, all_gather_copy_in_stream=all_gather_copy_in_stream, all_gather_stream=all_gather_stream, device=self.device, all_gather_comm=all_gather_comm, ) foreach_all_gather_copy_out(all_gather_result, fsdp_params, group) # Transition to unsharded state to register unsharded parameters for fsdp_param in fsdp_param_group.fsdp_params: fsdp_param.init_unsharded_param() fsdp_param_group._to_unsharded() def check_all_gathered_params( orig_params: list[nn.Parameter], module: nn.Module ): for orig_param, param in zip(orig_params, module.parameters()): self.assertIsInstance(param, torch.Tensor) self.assertIsInstance(param, nn.Parameter) self.assertEqual(param, orig_param.to(param.dtype)) # Set up the reference parameters and construct the FSDP group orig_params = self._init_params(param_sizes) fsdp_param_group = self._init_fsdp_param_group( orig_params, reshard_after_forward ) fsdp_params = fsdp_param_group.fsdp_params module = fsdp_param_group.modules[0] # Sanity check that the parameter sharding is as expected for orig_param, param in zip(orig_params, module.parameters()): self.assertTrue(isinstance(param, DTensor)) self.assertEqual(param.full_tensor(), orig_param) # Run the foreach all-gather (including copy-in and copy-out) all_gather(fsdp_param_group, fsdp_param_group.mesh_info.shard_process_group) # Check all-gather correctness check_all_gathered_params(orig_params, module) # For reshard after after forward as an int, further test emulating the # pre-backward all-gather if type(reshard_after_forward) is not int: return fsdp_param_group._to_sharded_post_forward() all_gather( fsdp_param_group, fsdp_param_group.post_forward_mesh_info.shard_process_group, ) check_all_gathered_params(orig_params, module) @skip_if_lt_x_gpu(1) def test_reduce_scatter_fp32(self): param_sizes = self._get_param_sizes() default_stream = device_module.current_stream() stream = device_module.Stream() for reduce_scatter_stream in (default_stream, stream): self._test_reduce_scatter( param_sizes, reduce_scatter_stream=reduce_scatter_stream, reduce_scatter_dtype=torch.float32, ) @skip_if_lt_x_gpu(1) def test_reduce_scatter_fp16(self): param_sizes = self._get_param_sizes() default_stream = torch.get_device_module(device_type).current_stream() stream = device_module.Stream() for reduce_scatter_stream in (default_stream, stream): self._test_reduce_scatter( param_sizes, reduce_scatter_stream=reduce_scatter_stream, reduce_scatter_dtype=torch.float16, ) def _test_reduce_scatter( self, param_sizes: list[torch.Size], reduce_scatter_stream, reduce_scatter_dtype: torch.dtype, ): # Set up the reference parameters and construct the FSDP group orig_params = self._init_params(param_sizes) fsdp_param_group = self._init_fsdp_param_group(orig_params, True) fsdp_params = fsdp_param_group.fsdp_params fsdp_param_group.comm_ctx.lazy_init(self.device) # Run one unshard to initialize metadata fsdp_param_group.unshard() fsdp_param_group.wait_for_unshard() fsdp_param_group.reshard() # Run the foreach reduce-scatter (including copy-in and view-out) torch.manual_seed(42) unsharded_grads = [torch.ones_like(param) * self.rank for param in orig_params] group = fsdp_param_group.mesh_info.shard_process_group self.assertEqual(group.size(), self.world_size) all_reduce_stream = device_module.Stream() comm = DefaultReduceScatter() ( _, _, post_reduce_event, _, _, _, ) = foreach_reduce( fsdp_params, unsharded_grads, group, reduce_scatter_stream, comm, orig_dtype=orig_params[0].dtype, reduce_dtype=reduce_scatter_dtype, device=self.device, gradient_divide_factor=None, all_reduce_group=None, all_reduce_stream=all_reduce_stream, all_reduce_hook=None, all_reduce_grads=True, partial_reduce_output=None, ) torch.get_device_module(device_type).current_stream().wait_event( post_reduce_event ) # Check reduce-scatter correctness ( predivide_factor, postdivide_factor, _, all_reduce_op, ) = _get_gradient_divide_factors(group, None, reduce_scatter_dtype) reduced_grads = [grad.detach().clone() for grad in unsharded_grads] for grad in reduced_grads: _div_if_needed(grad, predivide_factor) dist.all_reduce( grad, group=group, op=all_reduce_op, ) _div_if_needed(grad, postdivide_factor) for fsdp_param, reduced_grad in zip(fsdp_params, reduced_grads): sharded_grad = fsdp_param.sharded_param.grad self.assertIsInstance(sharded_grad, DTensor) self.assertEqual(sharded_grad.full_tensor(), reduced_grad)
TestFullyShardCollectiveOps
python
sympy__sympy
sympy/stats/rv.py
{ "start": 10350, "end": 16619 }
class ____(ProductPSpace): """ A probability space resulting from the merger of two independent probability spaces. Often created using the function, pspace. """ def __new__(cls, *spaces): rs_space_dict = {} for space in spaces: for value in space.values: rs_space_dict[value] = space symbols = FiniteSet(*[val.symbol for val in rs_space_dict.keys()]) # Overlapping symbols from sympy.stats.joint_rv import MarginalDistribution from sympy.stats.compound_rv import CompoundDistribution if len(symbols) < sum(len(space.symbols) for space in spaces if not isinstance(space.distribution, ( CompoundDistribution, MarginalDistribution))): raise ValueError("Overlapping Random Variables") if all(space.is_Finite for space in spaces): from sympy.stats.frv import ProductFinitePSpace cls = ProductFinitePSpace obj = Basic.__new__(cls, *FiniteSet(*spaces)) return obj @property def pdf(self): p = Mul(*[space.pdf for space in self.spaces]) return p.subs({rv: rv.symbol for rv in self.values}) @property def rs_space_dict(self): d = {} for space in self.spaces: for value in space.values: d[value] = space return d @property def symbols(self): return FiniteSet(*[val.symbol for val in self.rs_space_dict.keys()]) @property def spaces(self): return FiniteSet(*self.args) @property def values(self): return sumsets(space.values for space in self.spaces) def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs): rvs = rvs or self.values rvs = frozenset(rvs) for space in self.spaces: expr = space.compute_expectation(expr, rvs & space.values, evaluate=False, **kwargs) if evaluate and hasattr(expr, 'doit'): return expr.doit(**kwargs) return expr @property def domain(self): return ProductDomain(*[space.domain for space in self.spaces]) @property def density(self): raise NotImplementedError("Density not available for ProductSpaces") def sample(self, size=(), library='scipy', seed=None): return {k: v for space in self.spaces for k, v in space.sample(size=size, library=library, seed=seed).items()} def probability(self, condition, **kwargs): cond_inv = False if isinstance(condition, Ne): condition = Eq(condition.args[0], condition.args[1]) cond_inv = True elif isinstance(condition, And): # they are independent return Mul(*[self.probability(arg) for arg in condition.args]) elif isinstance(condition, Or): # they are independent return Add(*[self.probability(arg) for arg in condition.args]) expr = condition.lhs - condition.rhs rvs = random_symbols(expr) dens = self.compute_density(expr) if any(pspace(rv).is_Continuous for rv in rvs): from sympy.stats.crv import SingleContinuousPSpace from sympy.stats.crv_types import ContinuousDistributionHandmade if expr in self.values: # Marginalize all other random symbols out of the density randomsymbols = tuple(set(self.values) - frozenset([expr])) symbols = tuple(rs.symbol for rs in randomsymbols) pdf = self.domain.integrate(self.pdf, symbols, **kwargs) return Lambda(expr.symbol, pdf) dens = ContinuousDistributionHandmade(dens) z = Dummy('z', real=True) space = SingleContinuousPSpace(z, dens) result = space.probability(condition.__class__(space.value, 0)) else: from sympy.stats.drv import SingleDiscretePSpace from sympy.stats.drv_types import DiscreteDistributionHandmade dens = DiscreteDistributionHandmade(dens) z = Dummy('z', integer=True) space = SingleDiscretePSpace(z, dens) result = space.probability(condition.__class__(space.value, 0)) return result if not cond_inv else S.One - result def compute_density(self, expr, **kwargs): rvs = random_symbols(expr) if any(pspace(rv).is_Continuous for rv in rvs): z = Dummy('z', real=True) expr = self.compute_expectation(DiracDelta(expr - z), **kwargs) else: z = Dummy('z', integer=True) expr = self.compute_expectation(KroneckerDelta(expr, z), **kwargs) return Lambda(z, expr) def compute_cdf(self, expr, **kwargs): raise ValueError("CDF not well defined on multivariate expressions") def conditional_space(self, condition, normalize=True, **kwargs): rvs = random_symbols(condition) condition = condition.xreplace({rv: rv.symbol for rv in self.values}) pspaces = [pspace(rv) for rv in rvs] if any(ps.is_Continuous for ps in pspaces): from sympy.stats.crv import (ConditionalContinuousDomain, ContinuousPSpace) space = ContinuousPSpace domain = ConditionalContinuousDomain(self.domain, condition) elif any(ps.is_Discrete for ps in pspaces): from sympy.stats.drv import (ConditionalDiscreteDomain, DiscretePSpace) space = DiscretePSpace domain = ConditionalDiscreteDomain(self.domain, condition) elif all(ps.is_Finite for ps in pspaces): from sympy.stats.frv import FinitePSpace return FinitePSpace.conditional_space(self, condition) if normalize: replacement = {rv: Dummy(str(rv)) for rv in self.symbols} norm = domain.compute_expectation(self.pdf, **kwargs) pdf = self.pdf / norm.xreplace(replacement) # XXX: Converting symbols from set to tuple. The order matters to # Lambda though so we shouldn't be starting with a set here... density = Lambda(tuple(domain.symbols), pdf) return space(domain, density)
IndependentProductPSpace
python
kamyu104__LeetCode-Solutions
Python/maximum-value-after-insertion.py
{ "start": 29, "end": 412 }
class ____(object): def maxValue(self, n, x): """ :type n: str :type x: int :rtype: str """ check = (lambda i: str(x) > n[i]) if n[0] != '-' else (lambda i: str(x) < n[i]) for i in xrange(len(n)): if check(i): break else: i = len(n) return n[:i] + str(x) + n[i:]
Solution
python
lepture__authlib
authlib/oauth2/rfc6749/models.py
{ "start": 5684, "end": 7773 }
class ____: def check_client(self, client): """A method to check if this token is issued to the given client. For instance, ``client_id`` is saved on token table:: def check_client(self, client): return self.client_id == client.client_id :return: bool """ raise NotImplementedError() def get_scope(self): """A method to get scope of the authorization code. For instance, the column is called ``scope``:: def get_scope(self): return self.scope :return: scope string """ raise NotImplementedError() def get_expires_in(self): """A method to get the ``expires_in`` value of the token. e.g. the column is called ``expires_in``:: def get_expires_in(self): return self.expires_in :return: timestamp int """ raise NotImplementedError() def is_expired(self): """A method to define if this token is expired. For instance, there is a column ``expired_at`` in the table:: def is_expired(self): return self.expired_at < now :return: boolean """ raise NotImplementedError() def is_revoked(self): """A method to define if this token is revoked. For instance, there is a boolean column ``revoked`` in the table:: def is_revoked(self): return self.revoked :return: boolean """ raise NotImplementedError() def get_user(self): """A method to get the user object associated with this token: .. code-block:: def get_user(self): return User.get(self.user_id) """ raise NotImplementedError() def get_client(self) -> ClientMixin: """A method to get the client object associated with this token: .. code-block:: def get_client(self): return Client.get(self.client_id) """ raise NotImplementedError()
TokenMixin
python
django__django
tests/servers/tests.py
{ "start": 4972, "end": 5773 }
class ____(LiveServerBase): server_thread_class = FailingLiveServerThread @classmethod def check_allowed_hosts(cls, expected): if settings.ALLOWED_HOSTS != expected: raise RuntimeError(f"{settings.ALLOWED_HOSTS} != {expected}") @classmethod def setUpClass(cls): cls.check_allowed_hosts(["testserver"]) try: super().setUpClass() except RuntimeError: # LiveServerTestCase's change to ALLOWED_HOSTS should be reverted. cls.doClassCleanups() cls.check_allowed_hosts(["testserver"]) else: raise RuntimeError("Server did not fail.") cls.set_up_called = True def test_set_up_class(self): self.assertIs(self.set_up_called, True)
LiveServerTestCaseSetupTest
python
mlflow__mlflow
mlflow/server/auth/entities.py
{ "start": 3856, "end": 4753 }
class ____: def __init__( self, name, user_id, permission, ): self._name = name self._user_id = user_id self._permission = permission @property def name(self): return self._name @property def user_id(self): return self._user_id @property def permission(self): return self._permission @permission.setter def permission(self, permission): self._permission = permission def to_json(self): return { "name": self.name, "user_id": self.user_id, "permission": self.permission, } @classmethod def from_json(cls, dictionary): return cls( name=dictionary["name"], user_id=dictionary["user_id"], permission=dictionary["permission"], )
RegisteredModelPermission
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/licenses_1/package.py
{ "start": 217, "end": 566 }
class ____(Package): """Package with a licenses field.""" homepage = "https://www.example.com" url = "https://www.example.com/license" license("MIT", when="+foo") license("Apache-2.0", when="~foo") version("1.0", md5="0123456789abcdef0123456789abcdef") variant("foo", default=True, description="toggle license")
Licenses1
python
getsentry__sentry
tests/sentry_plugins/twilio/test_plugin.py
{ "start": 4068, "end": 5759 }
class ____(PluginTestCase): @cached_property def plugin(self) -> TwilioPlugin: return TwilioPlugin() def test_is_configured(self) -> None: for o in ("account_sid", "auth_token", "sms_from", "sms_to"): assert self.plugin.is_configured(self.project) is False self.plugin.set_option(o, "foo", self.project) assert self.plugin.is_configured(self.project) is True @responses.activate def test_simple_notification(self) -> None: responses.add("POST", "https://api.twilio.com/2010-04-01/Accounts/abcdef/Messages.json") self.plugin.set_option("account_sid", "abcdef", self.project) self.plugin.set_option("auth_token", "abcd", self.project) self.plugin.set_option("sms_from", "4158675309", self.project) self.plugin.set_option("sms_to", "4154444444", self.project) event = self.store_event( data={ "message": "Hello world", "level": "warning", "platform": "python", "culprit": "foo.bar", }, project_id=self.project.id, ) rule = Rule.objects.create(project=self.project, label="my rule") notification = Notification(event=event, rule=rule) with self.options({"system.url-prefix": "http://example.com"}): self.plugin.notify(notification) request = responses.calls[0].request payload = parse_qs(request.body) assert payload == { "To": ["+14154444444"], "From": ["+14158675309"], "Body": ["Sentry [%s] WARNING: Hello world" % self.project.slug.title()], }
TwilioPluginTest
python
kubernetes-client__python
kubernetes/client/models/v1beta1_variable.py
{ "start": 383, "end": 5269 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'expression': 'str', 'name': 'str' } attribute_map = { 'expression': 'expression', 'name': 'name' } def __init__(self, expression=None, name=None, local_vars_configuration=None): # noqa: E501 """V1beta1Variable - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._expression = None self._name = None self.discriminator = None self.expression = expression self.name = name @property def expression(self): """Gets the expression of this V1beta1Variable. # noqa: E501 Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation. # noqa: E501 :return: The expression of this V1beta1Variable. # noqa: E501 :rtype: str """ return self._expression @expression.setter def expression(self, expression): """Sets the expression of this V1beta1Variable. Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation. # noqa: E501 :param expression: The expression of this V1beta1Variable. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501 raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501 self._expression = expression @property def name(self): """Gets the name of this V1beta1Variable. # noqa: E501 Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo` # noqa: E501 :return: The name of this V1beta1Variable. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this V1beta1Variable. Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo` # noqa: E501 :param name: The name of this V1beta1Variable. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501 raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 self._name = name def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta1Variable): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta1Variable): return True return self.to_dict() != other.to_dict()
V1beta1Variable
python
pdm-project__pdm
src/pdm/cli/commands/venv/activate.py
{ "start": 312, "end": 2796 }
class ____(BaseCommand): """Print the command to activate the virtualenv with the given name""" arguments = (verbose_option,) def add_arguments(self, parser: argparse.ArgumentParser) -> None: parser.add_argument("env", nargs="?", help="The key of the virtualenv") def handle(self, project: Project, options: argparse.Namespace) -> None: if options.env: venv = get_venv_with_name(project, options.env) else: # Use what is saved in .pdm-python interpreter = project._saved_python if not interpreter: project.core.ui.warn( "The project doesn't have a saved python.path. Run [success]pdm use[/] to pick one." ) raise SystemExit(1) venv_like = VirtualEnv.from_interpreter(Path(interpreter)) if venv_like is None: project.core.ui.warn( f"Can't activate a non-venv Python [success]{interpreter}[/], " "you can specify one with [success]pdm venv activate <env_name>[/]", ) raise SystemExit(1) venv = venv_like project.core.ui.echo(self.get_activate_command(venv)) def get_activate_command(self, venv: VirtualEnv) -> str: # pragma: no cover try: shell, _ = shellingham.detect_shell() except shellingham.ShellDetectionFailure: shell = "" if shell == "fish": command, filename = "source", "activate.fish" elif shell in ["csh", "tcsh"]: command, filename = "source", "activate.csh" elif shell in ["powershell", "pwsh"]: command, filename = ".", "Activate.ps1" else: command, filename = "source", "activate" activate_script = venv.interpreter.with_name(filename) if activate_script.exists(): if platform.system() == "Windows": return f"{self.quote(str(activate_script), shell)}" return f"{command} {self.quote(str(activate_script), shell)}" # Conda backed virtualenvs don't have activate scripts return f"conda activate {self.quote(str(venv.root), shell)}" @staticmethod def quote(command: str, shell: str) -> str: if shell in ["powershell", "pwsh"] or platform.system() == "Windows": return "{}".format(command.replace("'", "''")) return shlex.quote(command)
ActivateCommand
python
tensorflow__tensorflow
tensorflow/python/ops/gradients_test.py
{ "start": 26707, "end": 26996 }
class ____(test_util.TensorFlowTestCase): def testStopGradient(self): with ops.Graph().as_default(): inp = constant(1.0, shape=[100, 32], name="in") out = array_ops.stop_gradient(inp) igrad = gradients.gradients(out, inp)[0] assert igrad is None
StopGradientTest
python
scipy__scipy
scipy/optimize/tests/test_quadratic_assignment.py
{ "start": 9909, "end": 13698 }
class ____(QAPCommonTests): method = "2opt" def test_deterministic(self): n = 20 rng = default_rng(51982908) A = rng.random(size=(n, n)) B = rng.random(size=(n, n)) res1 = quadratic_assignment(A, B, method=self.method, options={'rng': rng}) rng = default_rng(51982908) A = rng.random(size=(n, n)) B = rng.random(size=(n, n)) res2 = quadratic_assignment(A, B, method=self.method, options={'rng': rng}) assert_equal(res1.nit, res2.nit) def test_partial_guess(self): n = 5 rng = np.random.default_rng(4358764578823597324) A = rng.random(size=(n, n)) B = rng.random(size=(n, n)) res1 = quadratic_assignment(A, B, method=self.method, options={'rng': rng}) guess = np.array([np.arange(5), res1.col_ind]).T res2 = quadratic_assignment(A, B, method=self.method, options={'rng': rng, 'partial_guess': guess}) fix = [2, 4] match = np.array([np.arange(5)[fix], res1.col_ind[fix]]).T res3 = quadratic_assignment(A, B, method=self.method, options={'rng': rng, 'partial_guess': guess, 'partial_match': match}) assert_(res1.nit != n*(n+1)/2) assert_equal(res2.nit, n*(n+1)/2) # tests each swap exactly once assert_equal(res3.nit, (n-2)*(n-1)/2) # tests free swaps exactly once def test_specific_input_validation(self): # can't have more seed nodes than cost/dist nodes _rm = _range_matrix with pytest.raises( ValueError, match="`partial_guess` can have only as many entries as"): quadratic_assignment(np.identity(3), np.identity(3), method=self.method, options={'partial_guess': _rm(5, 2)}) # test for only two seed columns with pytest.raises( ValueError, match="`partial_guess` must have two columns"): quadratic_assignment( np.identity(3), np.identity(3), method=self.method, options={'partial_guess': _range_matrix(2, 3)} ) # test that seed has no more than two dimensions with pytest.raises( ValueError, match="`partial_guess` must have exactly two"): quadratic_assignment( np.identity(3), np.identity(3), method=self.method, options={'partial_guess': np.random.rand(3, 2, 2)} ) # seeds cannot be negative valued with pytest.raises( ValueError, match="`partial_guess` must contain only pos"): quadratic_assignment( np.identity(3), np.identity(3), method=self.method, options={'partial_guess': -1 * _range_matrix(2, 2)} ) # seeds can't have values greater than number of nodes with pytest.raises( ValueError, match="`partial_guess` entries must be less than number"): quadratic_assignment( np.identity(5), np.identity(5), method=self.method, options={'partial_guess': 2 * _range_matrix(4, 2)} ) # columns of seed matrix must be unique with pytest.raises( ValueError, match="`partial_guess` column entries must be unique"): quadratic_assignment( np.identity(3), np.identity(3), method=self.method, options={'partial_guess': np.ones((2, 2))} ) @pytest.mark.filterwarnings("ignore:The NumPy global RNG was seeded by calling")
Test2opt
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 180564, "end": 181175 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "project_id", "item_id", "field_id", "value", "client_mutation_id", ) project_id = sgqlc.types.Field(ID, graphql_name="projectId") item_id = sgqlc.types.Field(ID, graphql_name="itemId") field_id = sgqlc.types.Field(ID, graphql_name="fieldId") value = sgqlc.types.Field(String, graphql_name="value") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
UpdateProjectNextItemFieldInput
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 151544, "end": 151888 }
class ____(BaseModel): wal_capacity_mb: int = Field(..., description="Size of a single WAL segment in MB") wal_segments_ahead: int = Field(..., description="Number of WAL segments to create ahead of actually used ones") wal_retain_closed: Optional[int] = Field(default=1, description="Number of closed WAL segments to keep")
WalConfig
python
TheAlgorithms__Python
ciphers/hill_cipher.py
{ "start": 1593, "end": 7301 }
class ____: key_string = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) modulus = np.vectorize(lambda x: x % 36) to_int = np.vectorize(round) def __init__(self, encrypt_key: np.ndarray) -> None: """ encrypt_key is an NxN numpy array """ self.encrypt_key = self.modulus(encrypt_key) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key self.break_key = encrypt_key.shape[0] def replace_letters(self, letter: str) -> int: """ >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.replace_letters('T') 19 >>> hill_cipher.replace_letters('0') 26 """ return self.key_string.index(letter) def replace_digits(self, num: int) -> str: """ >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.replace_digits(19) 'T' >>> hill_cipher.replace_digits(26) '0' >>> hill_cipher.replace_digits(26.1) '0' """ return self.key_string[int(num)] def check_determinant(self) -> None: """ >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.check_determinant() """ det = round(np.linalg.det(self.encrypt_key)) if det < 0: det = det % len(self.key_string) req_l = len(self.key_string) if greatest_common_divisor(det, len(self.key_string)) != 1: msg = ( f"determinant modular {req_l} of encryption key({det}) " f"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(msg) def process_text(self, text: str) -> str: """ >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.process_text('Testing Hill Cipher') 'TESTINGHILLCIPHERR' >>> hill_cipher.process_text('hello') 'HELLOO' """ chars = [char for char in text.upper() if char in self.key_string] last = chars[-1] while len(chars) % self.break_key != 0: chars.append(last) return "".join(chars) def encrypt(self, text: str) -> str: """ >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.encrypt('testing hill cipher') 'WHXYJOLM9C6XT085LL' >>> hill_cipher.encrypt('hello') '85FF00' """ text = self.process_text(text.upper()) encrypted = "" for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] vec = [self.replace_letters(char) for char in batch] batch_vec = np.array([vec]).T batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[ 0 ] encrypted_batch = "".join( self.replace_digits(num) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def make_decrypt_key(self) -> np.ndarray: """ >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.make_decrypt_key() array([[ 6, 25], [ 5, 26]]) """ det = round(np.linalg.det(self.encrypt_key)) if det < 0: det = det % len(self.key_string) det_inv = None for i in range(len(self.key_string)): if (det * i) % len(self.key_string) == 1: det_inv = i break inv_key = ( det_inv * np.linalg.det(self.encrypt_key) * np.linalg.inv(self.encrypt_key) ) return self.to_int(self.modulus(inv_key)) def decrypt(self, text: str) -> str: """ >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL') 'TESTINGHILLCIPHERR' >>> hill_cipher.decrypt('85FF00') 'HELLOO' """ decrypt_key = self.make_decrypt_key() text = self.process_text(text.upper()) decrypted = "" for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] vec = [self.replace_letters(char) for char in batch] batch_vec = np.array([vec]).T batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0] decrypted_batch = "".join( self.replace_digits(num) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def main() -> None: n = int(input("Enter the order of the encryption key: ")) hill_matrix = [] print("Enter each row of the encryption key with space separated integers") for _ in range(n): row = [int(x) for x in input().split()] hill_matrix.append(row) hc = HillCipher(np.array(hill_matrix)) print("Would you like to encrypt or decrypt some text? (1 or 2)") option = input("\n1. Encrypt\n2. Decrypt\n") if option == "1": text_e = input("What text would you like to encrypt?: ") print("Your encrypted text is:") print(hc.encrypt(text_e)) elif option == "2": text_d = input("What text would you like to decrypt?: ") print("Your decrypted text is:") print(hc.decrypt(text_d)) if __name__ == "__main__": import doctest doctest.testmod() main()
HillCipher
python
ray-project__ray
python/ray/actor.py
{ "start": 74951, "end": 102296 }
class ____(Generic[T]): """A handle to an actor. The fields in this class are prefixed with _ray_ to hide them from the user and to avoid collision with actor method names. An ActorHandle can be created in three ways. First, by calling .remote() on an ActorClass. Second, by passing an actor handle into a task (forking the ActorHandle). Third, by directly serializing the ActorHandle (e.g., with cloudpickle). Attributes: _ray_actor_language: The actor language. _ray_actor_id: Actor ID. _ray_enable_task_events: The default value of whether task events is enabled, i.e., task events from the actor should be reported. _ray_method_is_generator: Map of method name -> if it is a generator method. _ray_method_decorators: Optional decorators for the function invocation. This can be used to change the behavior on the invocation side, whereas a regular decorator can be used to change the behavior on the execution side. _ray_method_signatures: The signatures of the actor methods. _ray_method_max_task_retries: Max number of retries on method failure. _ray_method_num_returns: The default number of return values for each method. _ray_method_retry_exceptions: The default value of boolean of whether you want to retry all user-raised exceptions, or a list of allowlist exceptions to retry. _ray_method_generator_backpressure_num_objects: Generator-only config. The max number of objects to generate before it starts pausing a generator. _ray_method_enable_task_events: The value of whether task tracing is enabled for the actor methods. This overrides the actor's default value (`_ray_enable_task_events`). _ray_method_name_to_tensor_transport: A dictionary mapping method names to their tensor transport protocol settings. The valid values are OBJECT_STORE (default), NCCL, or GLOO, and they are case-insensitive. _ray_actor_method_cpus: The number of CPUs required by actor methods. _ray_original_handle: True if this is the original actor handle for a given actor. If this is true, then the actor will be destroyed when this handle goes out of scope. _ray_weak_ref: True means that this handle does not count towards the distributed ref count for the actor, i.e. the actor may be GCed while this handle is still in scope. This is set to True if the handle was created by getting an actor by name or by getting the self handle. It is set to False if this is the original handle or if it was created by passing the original handle through task args and returns. _ray_is_cross_language: Whether this actor is cross language. _ray_actor_creation_function_descriptor: The function descriptor of the actor creation task. _ray_allow_out_of_order_execution: Whether the actor can execute tasks out of order. _ray_enable_tensor_transport: Whether tensor transport is enabled for this actor. """ def __init__( self, language, actor_id, max_task_retries: Optional[int], enable_task_events: bool, method_is_generator: Dict[str, bool], method_decorators, method_signatures, method_num_returns: Dict[str, Union[int, Literal["streaming"]]], method_max_task_retries: Dict[str, int], method_retry_exceptions: Dict[str, Union[bool, list, tuple]], method_generator_backpressure_num_objects: Dict[str, int], method_enable_task_events: Dict[str, bool], enable_tensor_transport: bool, method_name_to_tensor_transport: Dict[str, TensorTransportEnum], actor_method_cpus: int, actor_creation_function_descriptor, cluster_and_job, original_handle=False, weak_ref: bool = False, allow_out_of_order_execution: Optional[bool] = None, ): """Initialize an ActorHandle. Args: language: The actor language. actor_id: The ID of the actor. max_task_retries: The maximum number of times to retry a task when it fails. enable_task_events: Whether task events should be enabled for this actor. method_is_generator: Dictionary mapping method names to whether they are generator methods. method_decorators: Dictionary mapping method names to their decorators. method_signatures: Dictionary mapping method names to their signatures. method_num_returns: Dictionary mapping method names to their number of return values. method_max_task_retries: Dictionary mapping method names to their maximum task retries. method_retry_exceptions: Dictionary mapping method names to their retry exception settings. method_generator_backpressure_num_objects: Dictionary mapping method names to their generator backpressure settings. method_enable_task_events: Dictionary mapping method names to whether task events are enabled. enable_tensor_transport: Whether tensor transport is enabled for this actor. If True, then methods can be called with .options(tensor_transport=...) to specify a non-default tensor transport. method_name_to_tensor_transport: Dictionary mapping method names to their tensor transport settings. actor_method_cpus: The number of CPUs required by actor methods. actor_creation_function_descriptor: The function descriptor for actor creation. cluster_and_job: The cluster and job information. original_handle: Whether this is the original actor handle. weak_ref: Whether this is a weak reference to the actor. allow_out_of_order_execution: Whether the actor can execute tasks out of order. """ self._ray_actor_language = language self._ray_actor_id = actor_id self._ray_max_task_retries = max_task_retries self._ray_original_handle = original_handle self._ray_weak_ref = weak_ref self._ray_enable_task_events = enable_task_events self._ray_allow_out_of_order_execution = allow_out_of_order_execution self._ray_method_is_generator = method_is_generator self._ray_method_decorators = method_decorators self._ray_method_signatures = method_signatures self._ray_method_num_returns = method_num_returns self._ray_method_max_task_retries = method_max_task_retries self._ray_method_retry_exceptions = method_retry_exceptions self._ray_method_generator_backpressure_num_objects = ( method_generator_backpressure_num_objects ) self._ray_method_enable_task_events = method_enable_task_events self._ray_enable_tensor_transport = enable_tensor_transport self._ray_method_name_to_tensor_transport = method_name_to_tensor_transport self._ray_actor_method_cpus = actor_method_cpus self._ray_cluster_and_job = cluster_and_job self._ray_is_cross_language = language != Language.PYTHON self._ray_actor_creation_function_descriptor = ( actor_creation_function_descriptor ) self._ray_function_descriptor = {} # This is incremented each time `bind()` is called on an actor handle # (in Ray DAGs), therefore capturing the bind order of the actor methods. # TODO: this does not work properly if the caller has two copies of the # same actor handle, and needs to be fixed. self._ray_dag_bind_index = 0 if not self._ray_is_cross_language: assert isinstance( actor_creation_function_descriptor, PythonFunctionDescriptor ) module_name = actor_creation_function_descriptor.module_name class_name = actor_creation_function_descriptor.class_name for method_name in self._ray_method_signatures.keys(): function_descriptor = PythonFunctionDescriptor( module_name, method_name, class_name ) self._ray_function_descriptor[method_name] = function_descriptor # Build an _ActorMethodMetadata per method to cache expensive parsing logic. # The _ActorMethodMetadata doesn't take a reference to this ActorHandle to avoid a circular reference. # Instead, we will lazily bind this ActorHandle to the _ActorMethodMetadata when a method is invoked. self._method_shells = {} for method_name, method_signature in self._ray_method_signatures.items(): self._method_shells[method_name] = _ActorMethodMetadata( method_name=method_name, num_returns=self._ray_method_num_returns.get(method_name, None), max_task_retries=self._ray_method_max_task_retries.get( method_name, self._ray_max_task_retries ) or 0, retry_exceptions=self._ray_method_retry_exceptions.get(method_name), is_generator=self._ray_method_is_generator.get(method_name), generator_backpressure_num_objects=self._ray_method_generator_backpressure_num_objects.get( method_name ), enable_task_events=self._ray_method_enable_task_events.get( method_name, self._ray_enable_task_events ), decorator=self._ray_method_decorators.get(method_name), signature=method_signature, tensor_transport=self._ray_method_name_to_tensor_transport.get( method_name ), ) def __del__(self): # Weak references don't count towards the distributed ref count, so no # need to decrement the ref count. if self._ray_weak_ref: return try: # Mark that this actor handle has gone out of scope. Once all actor # handles are out of scope, the actor will exit. if ray._private.worker: worker = ray._private.worker.global_worker if worker.connected and hasattr(worker, "core_worker"): worker.core_worker.remove_actor_handle_reference(self._ray_actor_id) except AttributeError: # Suppress the attribute error which is caused by # python destruction ordering issue. # It only happen when python exits. pass def _actor_method_call( self, method_name: str, args: List[Any] = None, kwargs: Dict[str, Any] = None, name: str = "", num_returns: Optional[Union[int, Literal["streaming"]]] = None, max_task_retries: int = None, retry_exceptions: Union[bool, list, tuple] = None, concurrency_group_name: Optional[str] = None, generator_backpressure_num_objects: Optional[int] = None, enable_task_events: Optional[bool] = None, tensor_transport: Optional[TensorTransportEnum] = None, ): """Method execution stub for an actor handle. This is the function that executes when `actor.method_name.remote(*args, **kwargs)` is called. Instead of executing locally, the method is packaged as a task and scheduled to the remote actor instance. Args: method_name: The name of the actor method to execute. args: A list of arguments for the actor method. kwargs: A dictionary of keyword arguments for the actor method. name: The name to give the actor method call task. num_returns: The number of return values for the method. max_task_retries: Number of retries when method fails. retry_exceptions: Boolean of whether you want to retry all user-raised exceptions, or a list of allowlist exceptions to retry. concurrency_group_name: The name of the concurrency group to use. generator_backpressure_num_objects: The number of objects to generate before applying backpressure. enable_task_events: True if tracing is enabled, i.e., task events from the actor should be reported. tensor_transport: The tensor transport protocol to use for the actor method. Returns: object_refs: A list of object refs returned by the remote actor method. """ worker = ray._private.worker.global_worker args = args or [] kwargs = kwargs or {} if self._ray_is_cross_language: list_args = cross_language._format_args(worker, args, kwargs) function_descriptor = cross_language._get_function_descriptor_for_actor_method( # noqa: E501 self._ray_actor_language, self._ray_actor_creation_function_descriptor, method_name, # The signature for xlang should be "{length_of_arguments}" to handle # overloaded methods. signature=str(len(args) + len(kwargs)), ) else: function_signature = self._ray_method_signatures[method_name] if not args and not kwargs and not function_signature: list_args = [] else: list_args = signature.flatten_args(function_signature, args, kwargs) function_descriptor = self._ray_function_descriptor[method_name] if worker.mode == ray.LOCAL_MODE: assert ( not self._ray_is_cross_language ), "Cross language remote actor method cannot be executed locally." if num_returns == "dynamic": num_returns = -1 elif num_returns == "streaming": # TODO(sang): This is a temporary private API. # Remove it when we migrate to the streaming generator. num_returns = ray._raylet.STREAMING_GENERATOR_RETURN retry_exception_allowlist = None if retry_exceptions is None: retry_exceptions = False elif isinstance(retry_exceptions, (list, tuple)): retry_exception_allowlist = tuple(retry_exceptions) retry_exceptions = True assert isinstance( retry_exceptions, bool ), "retry_exceptions can either be \ boolean or list/tuple of exception types." if generator_backpressure_num_objects is None: generator_backpressure_num_objects = -1 object_refs = worker.core_worker.submit_actor_task( self._ray_actor_language, self._ray_actor_id, function_descriptor, list_args, name, num_returns, max_task_retries, retry_exceptions, retry_exception_allowlist, self._ray_actor_method_cpus, concurrency_group_name if concurrency_group_name is not None else b"", generator_backpressure_num_objects, enable_task_events, tensor_transport.value, ) if num_returns == STREAMING_GENERATOR_RETURN: # Streaming generator will return a single ref # that is for the generator task. assert len(object_refs) == 1 generator_ref = object_refs[0] return ObjectRefGenerator(generator_ref, worker) if len(object_refs) == 1: object_refs = object_refs[0] elif len(object_refs) == 0: object_refs = None return object_refs def __getattr__(self, item: str) -> Any: """Handle dynamic attribute access for actor methods. This method is called when accessing attributes that don't exist as direct instance attributes. It's the core mechanism for actor method invocation. For Python actors (99% of cases): - We use strict validation: only methods in _method_shells are allowed - This prevents typos and provides clear error messages - Returns a bound ActorMethod created from the cached _ActorMethodMetadata For cross-language actors: - We can't validate method names client-side (the target language defines them) - We allow arbitrary method calls to pass through - Some Python-specific methods like `__ray_terminate__` are blocked with warnings Args: item: The attribute/method name being accessed Returns: ActorMethod: A bound method ready for .remote() calls Raises: AttributeError: For Python actors when accessing non-existent methods """ # If this name matches a remote method, bind and return it. if item in self._method_shells: return self._method_shells[item].bind(self) if not self._ray_is_cross_language: raise AttributeError( f"'{type(self).__name__}' object has " f"no attribute '{item}'" ) if item in ["__ray_terminate__"]: class FakeActorMethod(object): def __call__(self, *args, **kwargs): raise TypeError( "Actor methods cannot be called directly. Instead " "of running 'object.{}()', try 'object.{}.remote()'.".format( item, item ) ) def remote(self, *args, **kwargs): logger.warning( f"Actor method {item} is not supported by cross language." ) return FakeActorMethod() return ActorMethod( self, # actor item, # method_name ray_constants.DEFAULT_ACTOR_METHOD_NUM_RETURN_VALS, 0, # max_task_retries False, # retry_exceptions False, # is_generator self._ray_method_generator_backpressure_num_objects.get(item, -1), self._ray_enable_task_events, # enable_task_events # Currently, cross-lang actor method not support decorator decorator=None, signature=None, ) # Make tab completion work. def __dir__(self): return self._ray_method_signatures.keys() def __repr__(self): return ( "Actor(" f"{self._ray_actor_creation_function_descriptor.class_name}, " f"{self._actor_id.hex()})" ) def __hash__(self): return hash(self._actor_id) def __eq__(self, __value): return hash(self) == hash(__value) @property def _actor_id(self): return self._ray_actor_id def _get_local_state(self): """Get the local actor state. NOTE: this method only returns accurate actor state after a first actor method call is made against this actor handle due to https://github.com/ray-project/ray/pull/24600. Returns: ActorTableData.ActorState or None if the state is unknown. """ worker = ray._private.worker.global_worker worker.check_connected() return worker.core_worker.get_local_actor_state(self._ray_actor_id) def _serialization_helper(self): """This is defined in order to make pickling work. Returns: A dictionary of the information needed to reconstruct the object. """ worker = ray._private.worker.global_worker worker.check_connected() if hasattr(worker, "core_worker"): # Non-local mode state = worker.core_worker.serialize_actor_handle(self._ray_actor_id) else: # Local mode state = ( { "actor_language": self._ray_actor_language, "actor_id": self._ray_actor_id, "max_task_retries": self._ray_max_task_retries, "enable_task_events": self._enable_task_events, "method_is_generator": self._ray_method_is_generator, "method_decorators": self._ray_method_decorators, "method_signatures": self._ray_method_signatures, "method_num_returns": self._ray_method_num_returns, "method_max_task_retries": self._ray_method_max_task_retries, "method_retry_exceptions": self._ray_method_retry_exceptions, "method_generator_backpressure_num_objects": ( self._ray_method_generator_backpressure_num_objects ), "method_enable_task_events": self._ray_method_enable_task_events, "enable_tensor_transport": self._ray_enable_tensor_transport, "method_name_to_tensor_transport": self._ray_method_name_to_tensor_transport, "actor_method_cpus": self._ray_actor_method_cpus, "actor_creation_function_descriptor": self._ray_actor_creation_function_descriptor, # noqa: E501 }, None, ) return (*state, self._ray_weak_ref) @classmethod def _deserialization_helper(cls, state, weak_ref: bool, outer_object_ref=None): """This is defined in order to make pickling work. Args: state: The serialized state of the actor handle. outer_object_ref: The ObjectRef that the serialized actor handle was contained in, if any. This is used for counting references to the actor handle. weak_ref: Whether this was serialized from an actor handle with a weak ref to the actor. """ worker = ray._private.worker.global_worker worker.check_connected() if hasattr(worker, "core_worker"): # Non-local mode return worker.core_worker.deserialize_and_register_actor_handle( state, outer_object_ref, weak_ref, ) else: # Local mode assert worker.current_cluster_and_job == state["current_cluster_and_job"] return cls( # TODO(swang): Accessing the worker's current task ID is not # thread-safe. state["actor_language"], state["actor_id"], state["max_task_retries"], state["enable_task_events"], state["method_is_generator"], state["method_decorators"], state["method_signatures"], state["method_num_returns"], state["method_max_task_retries"], state["method_retry_exceptions"], state["method_generator_backpressure_num_objects"], state["method_enable_task_events"], state["enable_tensor_transport"], state["method_name_to_tensor_transport"], state["actor_method_cpus"], state["actor_creation_function_descriptor"], state["current_cluster_and_job"], ) def __reduce__(self): """This code path is used by pickling but not by Ray forking.""" (serialized, _, weak_ref) = self._serialization_helper() # There is no outer object ref when the actor handle is # deserialized out-of-band using pickle. return ActorHandle._deserialization_helper, (serialized, weak_ref, None) def _modify_class(cls): # cls has been modified. if hasattr(cls, "__ray_actor_class__"): return cls # Give an error if cls is an old-style class. if not issubclass(cls, object): raise TypeError( "The @ray.remote decorator cannot be applied to old-style " "classes. In Python 2, you must declare the class with " "'class ClassName(object):' instead of 'class ClassName:'." ) # Modify the class to have additional default methods. class Class(cls): __ray_actor_class__ = cls # The original actor class def __ray_ready__(self): return True def __ray_call__(self, fn, *args, **kwargs): return fn(self, *args, **kwargs) def __ray_terminate__(self): worker = ray._private.worker.global_worker if worker.mode != ray.LOCAL_MODE: ray.actor.exit_actor() Class.__module__ = cls.__module__ Class.__name__ = cls.__name__ if not is_function_or_method(getattr(Class, "__init__", None)): # Add __init__ if it does not exist. # Actor creation will be executed with __init__ together. # Assign an __init__ function will avoid many checks later on. def __init__(self): pass Class.__init__ = __init__ return Class def _make_actor(cls, actor_options): Class = _modify_class(cls) _inject_tracing_into_class(Class) if "max_restarts" in actor_options: if actor_options["max_restarts"] != -1: # -1 represents infinite restart # Make sure we don't pass too big of an int to C++, causing # an overflow. actor_options["max_restarts"] = min( actor_options["max_restarts"], ray_constants.MAX_INT64_VALUE ) return ActorClass._ray_from_modified_class( Class, ActorClassID.from_random(), actor_options, ) @PublicAPI def exit_actor(): """Intentionally exit the current actor. This API can be used only inside an actor. Use ray.kill API if you'd like to kill an actor using actor handle. When this API is called, an exception is raised and the actor will exit immediately. For asyncio actors, there may be a short delay before the actor exits if the API is called from a background task. Any queued methods will fail. Any ``atexit`` handlers installed in the actor will be run. Raises: TypeError: An exception is raised if this is a driver or this worker is not an actor. """ worker = ray._private.worker.global_worker if worker.mode == ray.WORKER_MODE and not worker.actor_id.is_nil(): worker.core_worker.set_current_actor_should_exit() # In asyncio actor mode, we can't raise SystemExit because it will just # quit the asycnio event loop thread, not the main thread. Instead, we # raise a custom error to the main thread to tell it to exit. if worker.core_worker.current_actor_is_asyncio(): raise AsyncioActorExit() # Set a flag to indicate this is an intentional actor exit. This # reduces log verbosity. raise_sys_exit_with_custom_error_message("exit_actor() is called.") else: raise TypeError( "exit_actor API is called on a non-actor worker, " f"{worker.mode}. Call this API inside an actor methods" "if you'd like to exit the actor gracefully." )
ActorHandle
python
ray-project__ray
python/ray/exceptions.py
{ "start": 28702, "end": 28953 }
class ____(RayError): """Raised when the pending actor calls exceeds `max_pending_calls` option. This exception could happen probably because the caller calls the callee too frequently. """ pass @PublicAPI
PendingCallsLimitExceeded
python
facebook__pyre-check
tools/typeshed_patcher/patch_specs.py
{ "start": 2255, "end": 2566 }
class ____: ACTION_NAME: ClassVar[str] = "delete" name: str @staticmethod def from_json(input_dictionary: Mapping[str, object]) -> "DeleteAction": name = _ensure_string_value(input_dictionary, "name") return DeleteAction(name=name) @dataclasses.dataclass(frozen=True)
DeleteAction
python
pypa__hatch
backend/src/hatchling/builders/config.py
{ "start": 640, "end": 35662 }
class ____: def __init__( self, builder: BuilderInterface, root: str, plugin_name: str, build_config: dict[str, Any], target_config: dict[str, Any], ) -> None: self.__builder = builder self.__root = root self.__plugin_name = plugin_name self.__build_config = build_config self.__target_config = target_config # This is used when the only file selection is based on forced inclusion or build-time artifacts. This # instructs to `exclude` every encountered path without doing pattern matching that matches everything. self.__exclude_all: bool = False # Modified at build time self.build_artifact_spec: pathspec.GitIgnoreSpec | None = None self.build_force_include: dict[str, str] = {} self.build_reserved_paths: set[str] = set() @property def builder(self) -> BuilderInterface: return self.__builder @property def root(self) -> str: return self.__root @property def plugin_name(self) -> str: return self.__plugin_name @property def build_config(self) -> dict[str, Any]: return self.__build_config @property def target_config(self) -> dict[str, Any]: return self.__target_config def include_path(self, relative_path: str, *, explicit: bool = False, is_package: bool = True) -> bool: return ( self.path_is_build_artifact(relative_path) or self.path_is_artifact(relative_path) or ( not (self.only_packages and not is_package) and not self.path_is_excluded(relative_path) and (explicit or self.path_is_included(relative_path)) ) ) def path_is_included(self, relative_path: str) -> bool: if self.include_spec is None: return True return self.include_spec.match_file(relative_path) def path_is_excluded(self, relative_path: str) -> bool: if self.__exclude_all: return True if self.exclude_spec is None: return False return self.exclude_spec.match_file(relative_path) def path_is_artifact(self, relative_path: str) -> bool: if self.artifact_spec is None: return False return self.artifact_spec.match_file(relative_path) def path_is_build_artifact(self, relative_path: str) -> bool: if self.build_artifact_spec is None: return False return self.build_artifact_spec.match_file(relative_path) def path_is_reserved(self, relative_path: str) -> bool: return relative_path in self.build_reserved_paths def directory_is_excluded(self, name: str, relative_path: str) -> bool: if name in EXCLUDED_DIRECTORIES: return True relative_directory = os.path.join(relative_path, name) return ( self.path_is_reserved(relative_directory) # The trailing slash is necessary so e.g. `bar/` matches `foo/bar` or (self.skip_excluded_dirs and self.path_is_excluded(f"{relative_directory}/")) ) @cached_property def include_spec(self) -> pathspec.GitIgnoreSpec | None: if "include" in self.target_config: include_config = self.target_config include_location = f"tool.hatch.build.targets.{self.plugin_name}.include" else: include_config = self.build_config include_location = "tool.hatch.build.include" all_include_patterns = [] include_patterns = include_config.get("include", self.default_include()) if not isinstance(include_patterns, list): message = f"Field `{include_location}` must be an array of strings" raise TypeError(message) for i, include_pattern in enumerate(include_patterns, 1): if not isinstance(include_pattern, str): message = f"Pattern #{i} in field `{include_location}` must be a string" raise TypeError(message) if not include_pattern: message = f"Pattern #{i} in field `{include_location}` cannot be an empty string" raise ValueError(message) all_include_patterns.append(include_pattern) # Matching only at the root requires a forward slash, back slashes do not work. As such, # normalize to forward slashes for consistency. all_include_patterns.extend(f"/{relative_path.replace(os.sep, '/')}/" for relative_path in self.packages) if all_include_patterns: return pathspec.GitIgnoreSpec.from_lines(all_include_patterns) return None @cached_property def exclude_spec(self) -> pathspec.GitIgnoreSpec | None: if "exclude" in self.target_config: exclude_config = self.target_config exclude_location = f"tool.hatch.build.targets.{self.plugin_name}.exclude" else: exclude_config = self.build_config exclude_location = "tool.hatch.build.exclude" all_exclude_patterns = self.default_global_exclude() if not self.ignore_vcs: all_exclude_patterns.extend(self.load_vcs_exclusion_patterns()) exclude_patterns = exclude_config.get("exclude", self.default_exclude()) if not isinstance(exclude_patterns, list): message = f"Field `{exclude_location}` must be an array of strings" raise TypeError(message) for i, exclude_pattern in enumerate(exclude_patterns, 1): if not isinstance(exclude_pattern, str): message = f"Pattern #{i} in field `{exclude_location}` must be a string" raise TypeError(message) if not exclude_pattern: message = f"Pattern #{i} in field `{exclude_location}` cannot be an empty string" raise ValueError(message) all_exclude_patterns.append(exclude_pattern) if all_exclude_patterns: return pathspec.GitIgnoreSpec.from_lines(all_exclude_patterns) return None @property def artifact_spec(self) -> pathspec.GitIgnoreSpec | None: if "artifacts" in self.target_config: artifact_config = self.target_config artifact_location = f"tool.hatch.build.targets.{self.plugin_name}.artifacts" else: artifact_config = self.build_config artifact_location = "tool.hatch.build.artifacts" all_artifact_patterns = [] artifact_patterns = artifact_config.get("artifacts", []) if not isinstance(artifact_patterns, list): message = f"Field `{artifact_location}` must be an array of strings" raise TypeError(message) for i, artifact_pattern in enumerate(artifact_patterns, 1): if not isinstance(artifact_pattern, str): message = f"Pattern #{i} in field `{artifact_location}` must be a string" raise TypeError(message) if not artifact_pattern: message = f"Pattern #{i} in field `{artifact_location}` cannot be an empty string" raise ValueError(message) all_artifact_patterns.append(artifact_pattern) if all_artifact_patterns: return pathspec.GitIgnoreSpec.from_lines(all_artifact_patterns) return None @cached_property def hook_config(self) -> dict[str, Any]: hook_config: dict[str, dict[str, Any]] = {} global_hook_config = self.build_config.get("hooks", {}) if not isinstance(global_hook_config, dict): message = "Field `tool.hatch.build.hooks` must be a table" raise TypeError(message) for hook_name, config in global_hook_config.items(): if not isinstance(config, dict): message = f"Field `tool.hatch.build.hooks.{hook_name}` must be a table" raise TypeError(message) hook_config.setdefault(hook_name, config) target_hook_config = self.target_config.get("hooks", {}) if not isinstance(target_hook_config, dict): message = f"Field `tool.hatch.build.targets.{self.plugin_name}.hooks` must be a table" raise TypeError(message) for hook_name, config in target_hook_config.items(): if not isinstance(config, dict): message = f"Field `tool.hatch.build.targets.{self.plugin_name}.hooks.{hook_name}` must be a table" raise TypeError(message) hook_config[hook_name] = config if not env_var_enabled(BuildEnvVars.NO_HOOKS): all_hooks_enabled = env_var_enabled(BuildEnvVars.HOOKS_ENABLE) final_hook_config = { hook_name: config for hook_name, config in hook_config.items() if ( all_hooks_enabled or config.get("enable-by-default", True) or env_var_enabled(f"{BuildEnvVars.HOOK_ENABLE_PREFIX}{hook_name.upper()}") ) } else: final_hook_config = {} return final_hook_config @cached_property def directory(self) -> str: if "directory" in self.target_config: directory = self.target_config["directory"] if not isinstance(directory, str): message = f"Field `tool.hatch.build.targets.{self.plugin_name}.directory` must be a string" raise TypeError(message) else: directory = self.build_config.get("directory", DEFAULT_BUILD_DIRECTORY) if not isinstance(directory, str): message = "Field `tool.hatch.build.directory` must be a string" raise TypeError(message) return self.normalize_build_directory(directory) @cached_property def skip_excluded_dirs(self) -> bool: if "skip-excluded-dirs" in self.target_config: skip_excluded_dirs = self.target_config["skip-excluded-dirs"] if not isinstance(skip_excluded_dirs, bool): message = f"Field `tool.hatch.build.targets.{self.plugin_name}.skip-excluded-dirs` must be a boolean" raise TypeError(message) else: skip_excluded_dirs = self.build_config.get("skip-excluded-dirs", False) if not isinstance(skip_excluded_dirs, bool): message = "Field `tool.hatch.build.skip-excluded-dirs` must be a boolean" raise TypeError(message) return skip_excluded_dirs @cached_property def ignore_vcs(self) -> bool: if "ignore-vcs" in self.target_config: ignore_vcs = self.target_config["ignore-vcs"] if not isinstance(ignore_vcs, bool): message = f"Field `tool.hatch.build.targets.{self.plugin_name}.ignore-vcs` must be a boolean" raise TypeError(message) else: ignore_vcs = self.build_config.get("ignore-vcs", False) if not isinstance(ignore_vcs, bool): message = "Field `tool.hatch.build.ignore-vcs` must be a boolean" raise TypeError(message) return ignore_vcs @cached_property def require_runtime_dependencies(self) -> bool: if "require-runtime-dependencies" in self.target_config: require_runtime_dependencies = self.target_config["require-runtime-dependencies"] if not isinstance(require_runtime_dependencies, bool): message = ( f"Field `tool.hatch.build.targets.{self.plugin_name}.require-runtime-dependencies` " f"must be a boolean" ) raise TypeError(message) else: require_runtime_dependencies = self.build_config.get("require-runtime-dependencies", False) if not isinstance(require_runtime_dependencies, bool): message = "Field `tool.hatch.build.require-runtime-dependencies` must be a boolean" raise TypeError(message) return require_runtime_dependencies @cached_property def require_runtime_features(self) -> list[str]: if "require-runtime-features" in self.target_config: features_config = self.target_config features_location = f"tool.hatch.build.targets.{self.plugin_name}.require-runtime-features" else: features_config = self.build_config features_location = "tool.hatch.build.require-runtime-features" require_runtime_features = features_config.get("require-runtime-features", []) if not isinstance(require_runtime_features, list): message = f"Field `{features_location}` must be an array" raise TypeError(message) all_features: dict[str, None] = {} for i, raw_feature in enumerate(require_runtime_features, 1): if not isinstance(raw_feature, str): message = f"Feature #{i} of field `{features_location}` must be a string" raise TypeError(message) if not raw_feature: message = f"Feature #{i} of field `{features_location}` cannot be an empty string" raise ValueError(message) feature = normalize_project_name(raw_feature) if feature not in self.builder.metadata.core.optional_dependencies: message = ( f"Feature `{feature}` of field `{features_location}` is not defined in " f"field `project.optional-dependencies`" ) raise ValueError(message) all_features[feature] = None return list(all_features) @cached_property def only_packages(self) -> bool: """ Whether or not the target should ignore non-artifact files that do not reside within a Python package. """ if "only-packages" in self.target_config: only_packages = self.target_config["only-packages"] if not isinstance(only_packages, bool): message = f"Field `tool.hatch.build.targets.{self.plugin_name}.only-packages` must be a boolean" raise TypeError(message) else: only_packages = self.build_config.get("only-packages", False) if not isinstance(only_packages, bool): message = "Field `tool.hatch.build.only-packages` must be a boolean" raise TypeError(message) return only_packages @cached_property def reproducible(self) -> bool: """ Whether or not the target should be built in a reproducible manner, defaulting to true. """ if "reproducible" in self.target_config: reproducible = self.target_config["reproducible"] if not isinstance(reproducible, bool): message = f"Field `tool.hatch.build.targets.{self.plugin_name}.reproducible` must be a boolean" raise TypeError(message) else: reproducible = self.build_config.get("reproducible", True) if not isinstance(reproducible, bool): message = "Field `tool.hatch.build.reproducible` must be a boolean" raise TypeError(message) return reproducible @cached_property def dev_mode_dirs(self) -> list[str]: """ Directories which must be added to Python's search path in [dev mode](../config/environment/overview.md#dev-mode). """ if "dev-mode-dirs" in self.target_config: dev_mode_dirs_config = self.target_config dev_mode_dirs_location = f"tool.hatch.build.targets.{self.plugin_name}.dev-mode-dirs" else: dev_mode_dirs_config = self.build_config dev_mode_dirs_location = "tool.hatch.build.dev-mode-dirs" all_dev_mode_dirs = [] dev_mode_dirs = dev_mode_dirs_config.get("dev-mode-dirs", []) if not isinstance(dev_mode_dirs, list): message = f"Field `{dev_mode_dirs_location}` must be an array of strings" raise TypeError(message) for i, dev_mode_dir in enumerate(dev_mode_dirs, 1): if not isinstance(dev_mode_dir, str): message = f"Directory #{i} in field `{dev_mode_dirs_location}` must be a string" raise TypeError(message) if not dev_mode_dir: message = f"Directory #{i} in field `{dev_mode_dirs_location}` cannot be an empty string" raise ValueError(message) all_dev_mode_dirs.append(dev_mode_dir) return all_dev_mode_dirs @cached_property def dev_mode_exact(self) -> bool: if "dev-mode-exact" in self.target_config: dev_mode_exact = self.target_config["dev-mode-exact"] if not isinstance(dev_mode_exact, bool): message = f"Field `tool.hatch.build.targets.{self.plugin_name}.dev-mode-exact` must be a boolean" raise TypeError(message) else: dev_mode_exact = self.build_config.get("dev-mode-exact", False) if not isinstance(dev_mode_exact, bool): message = "Field `tool.hatch.build.dev-mode-exact` must be a boolean" raise TypeError(message) return dev_mode_exact @cached_property def versions(self) -> list[str]: # Used as an ordered set all_versions: dict[str, None] = {} versions = self.target_config.get("versions", []) if not isinstance(versions, list): message = f"Field `tool.hatch.build.targets.{self.plugin_name}.versions` must be an array of strings" raise TypeError(message) for i, version in enumerate(versions, 1): if not isinstance(version, str): message = ( f"Version #{i} in field `tool.hatch.build.targets.{self.plugin_name}.versions` must be a string" ) raise TypeError(message) if not version: message = ( f"Version #{i} in field `tool.hatch.build.targets.{self.plugin_name}.versions` " f"cannot be an empty string" ) raise ValueError(message) all_versions[version] = None if not all_versions: default_versions = self.__builder.get_default_versions() for version in default_versions: all_versions[version] = None else: unknown_versions = set(all_versions) - set(self.__builder.get_version_api()) if unknown_versions: message = ( f"Unknown versions in field `tool.hatch.build.targets.{self.plugin_name}.versions`: " f"{', '.join(map(str, sorted(unknown_versions)))}" ) raise ValueError(message) return list(all_versions) @cached_property def dependencies(self) -> list[str]: # Used as an ordered set dependencies: dict[str, None] = {} target_dependencies = self.target_config.get("dependencies", []) if not isinstance(target_dependencies, list): message = f"Field `tool.hatch.build.targets.{self.plugin_name}.dependencies` must be an array" raise TypeError(message) for i, dependency in enumerate(target_dependencies, 1): if not isinstance(dependency, str): message = ( f"Dependency #{i} of field `tool.hatch.build.targets.{self.plugin_name}.dependencies` " f"must be a string" ) raise TypeError(message) dependencies[dependency] = None global_dependencies = self.build_config.get("dependencies", []) if not isinstance(global_dependencies, list): message = "Field `tool.hatch.build.dependencies` must be an array" raise TypeError(message) for i, dependency in enumerate(global_dependencies, 1): if not isinstance(dependency, str): message = f"Dependency #{i} of field `tool.hatch.build.dependencies` must be a string" raise TypeError(message) dependencies[dependency] = None require_runtime_dependencies = self.require_runtime_dependencies require_runtime_features = dict.fromkeys(self.require_runtime_features) for hook_name, config in self.hook_config.items(): hook_require_runtime_dependencies = config.get("require-runtime-dependencies", False) if not isinstance(hook_require_runtime_dependencies, bool): message = f"Option `require-runtime-dependencies` of build hook `{hook_name}` must be a boolean" raise TypeError(message) if hook_require_runtime_dependencies: require_runtime_dependencies = True hook_require_runtime_features = config.get("require-runtime-features", []) if not isinstance(hook_require_runtime_features, list): message = f"Option `require-runtime-features` of build hook `{hook_name}` must be an array" raise TypeError(message) for i, raw_feature in enumerate(hook_require_runtime_features, 1): if not isinstance(raw_feature, str): message = ( f"Feature #{i} of option `require-runtime-features` of build hook `{hook_name}` " f"must be a string" ) raise TypeError(message) if not raw_feature: message = ( f"Feature #{i} of option `require-runtime-features` of build hook `{hook_name}` " f"cannot be an empty string" ) raise ValueError(message) feature = normalize_project_name(raw_feature) if feature not in self.builder.metadata.core.optional_dependencies: message = ( f"Feature `{feature}` of option `require-runtime-features` of build hook `{hook_name}` " f"is not defined in field `project.optional-dependencies`" ) raise ValueError(message) require_runtime_features[feature] = None hook_dependencies = config.get("dependencies", []) if not isinstance(hook_dependencies, list): message = f"Option `dependencies` of build hook `{hook_name}` must be an array" raise TypeError(message) for i, dependency in enumerate(hook_dependencies, 1): if not isinstance(dependency, str): message = f"Dependency #{i} of option `dependencies` of build hook `{hook_name}` must be a string" raise TypeError(message) dependencies[dependency] = None if require_runtime_dependencies: for dependency in self.builder.metadata.core.dependencies: dependencies[dependency] = None if require_runtime_features: for feature in require_runtime_features: for dependency in self.builder.metadata.core.optional_dependencies[feature]: dependencies[dependency] = None for dependency in self.dynamic_dependencies: dependencies[dependency] = None return list(dependencies) @cached_property def dynamic_dependencies(self) -> list[str]: dependencies = [] for hook_name, config in self.hook_config.items(): build_hook_cls = self.builder.plugin_manager.build_hook.get(hook_name) if build_hook_cls is None: continue # Hook exists but dynamic dependencies are not imported lazily. # This happens for example when using the `custom` build hook. try: build_hook = build_hook_cls( self.root, config, self, self.builder.metadata, "", self.builder.PLUGIN_NAME, self.builder.app ) except ImportError: continue dependencies.extend(build_hook.dependencies()) return dependencies @cached_property def sources(self) -> dict[str, str]: if "sources" in self.target_config: sources_config = self.target_config sources_location = f"tool.hatch.build.targets.{self.plugin_name}.sources" else: sources_config = self.build_config sources_location = "tool.hatch.build.sources" sources = {} raw_sources = sources_config.get("sources", []) if isinstance(raw_sources, list): for i, source in enumerate(raw_sources, 1): if not isinstance(source, str): message = f"Source #{i} in field `{sources_location}` must be a string" raise TypeError(message) if not source: message = f"Source #{i} in field `{sources_location}` cannot be an empty string" raise ValueError(message) sources[normalize_relative_directory(source)] = "" elif isinstance(raw_sources, dict): for source, path in raw_sources.items(): if not isinstance(path, str): message = f"Path for source `{source}` in field `{sources_location}` must be a string" raise TypeError(message) normalized_path = normalize_relative_path(path) if normalized_path == ".": normalized_path = "" else: normalized_path += os.sep sources[normalize_relative_directory(source) if source else source] = normalized_path else: message = f"Field `{sources_location}` must be a mapping or array of strings" raise TypeError(message) for relative_path in self.packages: source, _package = os.path.split(relative_path) if source and normalize_relative_directory(relative_path) not in sources: sources[normalize_relative_directory(source)] = "" return dict(sorted(sources.items())) @cached_property def packages(self) -> list[str]: if "packages" in self.target_config: package_config = self.target_config package_location = f"tool.hatch.build.targets.{self.plugin_name}.packages" else: package_config = self.build_config package_location = "tool.hatch.build.packages" packages = package_config.get("packages", self.default_packages()) if not isinstance(packages, list): message = f"Field `{package_location}` must be an array of strings" raise TypeError(message) for i, package in enumerate(packages, 1): if not isinstance(package, str): message = f"Package #{i} in field `{package_location}` must be a string" raise TypeError(message) if not package: message = f"Package #{i} in field `{package_location}` cannot be an empty string" raise ValueError(message) return sorted(normalize_relative_path(package) for package in packages) @cached_property def force_include(self) -> dict[str, str]: if "force-include" in self.target_config: force_include_config = self.target_config force_include_location = f"tool.hatch.build.targets.{self.plugin_name}.force-include" else: force_include_config = self.build_config force_include_location = "tool.hatch.build.force-include" force_include = force_include_config.get("force-include", {}) if not isinstance(force_include, dict): message = f"Field `{force_include_location}` must be a mapping" raise TypeError(message) for i, (source, relative_path) in enumerate(force_include.items(), 1): if not source: message = f"Source #{i} in field `{force_include_location}` cannot be an empty string" raise ValueError(message) if not isinstance(relative_path, str): message = f"Path for source `{source}` in field `{force_include_location}` must be a string" raise TypeError(message) if not relative_path: message = f"Path for source `{source}` in field `{force_include_location}` cannot be an empty string" raise ValueError(message) return normalize_inclusion_map(force_include, self.root) @cached_property def only_include(self) -> dict[str, str]: if "only-include" in self.target_config: only_include_config = self.target_config only_include_location = f"tool.hatch.build.targets.{self.plugin_name}.only-include" else: only_include_config = self.build_config only_include_location = "tool.hatch.build.only-include" only_include = only_include_config.get("only-include", self.default_only_include()) or self.packages if not isinstance(only_include, list): message = f"Field `{only_include_location}` must be an array" raise TypeError(message) inclusion_map = {} for i, relative_path in enumerate(only_include, 1): if not isinstance(relative_path, str): message = f"Path #{i} in field `{only_include_location}` must be a string" raise TypeError(message) normalized_path = normalize_relative_path(relative_path) if not normalized_path or normalized_path.startswith(("~", "..")): message = f"Path #{i} in field `{only_include_location}` must be relative: {relative_path}" raise ValueError(message) if normalized_path in inclusion_map: message = f"Duplicate path in field `{only_include_location}`: {normalized_path}" raise ValueError(message) inclusion_map[normalized_path] = normalized_path return normalize_inclusion_map(inclusion_map, self.root) def get_distribution_path(self, relative_path: str) -> str: # src/foo/bar.py -> foo/bar.py for source, replacement in self.sources.items(): if not source: return replacement + relative_path if relative_path.startswith(source): return relative_path.replace(source, replacement, 1) return relative_path @cached_property def vcs_exclusion_files(self) -> dict[str, list[str]]: exclusion_files: dict[str, list[str]] = {"git": [], "hg": []} local_gitignore = locate_file(self.root, ".gitignore", boundary=".git") if local_gitignore is not None: exclusion_files["git"].append(local_gitignore) local_hgignore = locate_file(self.root, ".hgignore", boundary=".hg") if local_hgignore is not None: exclusion_files["hg"].append(local_hgignore) return exclusion_files def load_vcs_exclusion_patterns(self) -> list[str]: patterns = [] # https://git-scm.com/docs/gitignore#_pattern_format for exclusion_file in self.vcs_exclusion_files["git"]: with open(exclusion_file, encoding="utf-8") as f: patterns.extend(f.readlines()) # https://linux.die.net/man/5/hgignore for exclusion_file in self.vcs_exclusion_files["hg"]: with open(exclusion_file, encoding="utf-8") as f: glob_mode = False for line in f: exact_line = line.strip() if exact_line == "syntax: glob": glob_mode = True continue if exact_line.startswith("syntax: "): glob_mode = False continue if glob_mode: patterns.append(line) # validate project root is not excluded by vcs exclude_spec = pathspec.GitIgnoreSpec.from_lines(patterns) if exclude_spec.match_file(self.root): return [] return patterns def normalize_build_directory(self, build_directory: str) -> str: if not os.path.isabs(build_directory): build_directory = os.path.join(self.root, build_directory) return os.path.normpath(build_directory) def default_include(self) -> list: # noqa: PLR6301 return [] def default_exclude(self) -> list: # noqa: PLR6301 return [] def default_packages(self) -> list: # noqa: PLR6301 return [] def default_only_include(self) -> list: # noqa: PLR6301 return [] def default_global_exclude(self) -> list[str]: # noqa: PLR6301 patterns = ["*.py[cdo]", f"/{DEFAULT_BUILD_DIRECTORY}"] patterns.sort() return patterns def set_exclude_all(self) -> None: self.__exclude_all = True def get_force_include(self) -> dict[str, str]: force_include = self.force_include.copy() force_include.update(self.build_force_include) return force_include @contextmanager def set_build_data(self, build_data: dict[str, Any]) -> Generator: try: # Include anything the hooks indicate build_artifacts = build_data["artifacts"] if build_artifacts: self.build_artifact_spec = pathspec.GitIgnoreSpec.from_lines(build_artifacts) self.build_force_include.update(normalize_inclusion_map(build_data["force_include"], self.root)) for inclusion_map in (self.force_include, self.build_force_include): for source, target in inclusion_map.items(): # Ignore source # old/ -> new/ # old.ext -> new.ext if source.startswith(f"{self.root}{os.sep}"): self.build_reserved_paths.add(self.get_distribution_path(os.path.relpath(source, self.root))) # Ignore target files only # ../out.ext -> ../in.ext elif os.path.isfile(source): self.build_reserved_paths.add(self.get_distribution_path(target)) yield finally: self.build_artifact_spec = None self.build_force_include.clear() self.build_reserved_paths.clear() def env_var_enabled(env_var: str, *, default: bool = False) -> bool: if env_var in os.environ: return os.environ[env_var] in {"1", "true"} return default BuilderConfigBound = TypeVar("BuilderConfigBound", bound=BuilderConfig)
BuilderConfig
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/events.py
{ "start": 4140, "end": 4427 }
class ____(Event): __slots__ = ('explicit',) def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None): # type: (Any, Any, Any, Any) -> None Event.__init__(self, start_mark, end_mark, comment) self.explicit = explicit
DocumentEndEvent
python
prabhupant__python-ds
data_structures/graphs/dag_longest_path.py
{ "start": 122, "end": 1560 }
class ____: def __init__(self, vertices): self.graph = defaultdict(list) self.vertices = vertices def add_edge(self, u, v, w): self.graph[u].append((v, w)) def topological_sort_util(self, vertex, visited, stack): visited[vertex] = True for v, weight in self.graph[vertex]: if visited[v] == False: self.topological_sort_util(v, visited, stack) stack.insert(0, vertex) def topological_sort(self): visited = [False] * self.vertices stack = [] for v in range(self.vertices): if visited[v] == False: self.topological_sort_util(v, visited, stack) return stack def longest_path(self, s): stack = self.topological_sort() distance = [-float("inf")] * self.vertices distance[s] = 0 while stack: i = stack.pop(0) for vertex, weight in self.graph[i]: if distance[vertex] < distance[i] + weight: distance[vertex] = distance[i] + weight for i in range(self.vertices): print(f"{s} -> {i} = {distance[i]}") g = Graph(6) g.add_edge(0, 1, 5) g.add_edge(0, 2, 3) g.add_edge(1, 3, 6) g.add_edge(1, 2, 2) g.add_edge(2, 4, 4) g.add_edge(2, 5, 2) g.add_edge(2, 3, 7) g.add_edge(3, 5, 1) g.add_edge(3, 4, -1) g.add_edge(4, 5, -2) source = 0 g.longest_path(source)
Graph
python
Textualize__textual
tests/command_palette/test_declare_sources.py
{ "start": 2178, "end": 2669 }
class ____(ScreenWithNoSources): COMMANDS = {ExampleCommandSource} async def test_screen_command_sources() -> None: """Command sources declared on a screen should be in the command palette.""" async with AppWithInitialScreen(ScreenWithSources()).run_test() as pilot: assert isinstance(pilot.app.screen, CommandPalette) assert pilot.app.screen._provider_classes == { SystemCommandsProvider, ExampleCommandSource, }
ScreenWithSources
python
getsentry__sentry
src/sentry/monitors/serializers.py
{ "start": 1224, "end": 1540 }
class ____(Serializer): def serialize(self, obj, attrs, user, **kwargs) -> MonitorEnvBrokenDetectionSerializerResponse: return { "userNotifiedTimestamp": obj.user_notified_timestamp, "environmentMutedTimestamp": obj.env_muted_timestamp, }
MonitorEnvBrokenDetectionSerializer
python
ray-project__ray
doc/source/serve/doc_code/grpc_proxy/grpc_guide.py
{ "start": 5101, "end": 6908 }
class ____: def __init__( self, _image_downloader: DeploymentHandle, _data_preprocessor: DeploymentHandle, ): self._image_downloader = _image_downloader self._data_preprocessor = _data_preprocessor self.model = torch.hub.load( "pytorch/vision:v0.10.0", "resnet18", pretrained=True ) self.model.eval() self.categories = self._image_labels() def _image_labels(self) -> List[str]: categories = [] url = ( "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" ) labels = requests.get(url).text for label in labels.split("\n"): categories.append(label.strip()) return categories async def Predict(self, image_data: ImageData) -> ImageClass: # Download image image = await self._image_downloader.remote(image_data.url) # Preprocess image input_batch = await self._data_preprocessor.remote(image) # Predict image with torch.no_grad(): output = self.model(input_batch) probabilities = torch.nn.functional.softmax(output[0], dim=0) return self.process_model_outputs(probabilities) def process_model_outputs(self, probabilities: torch.Tensor) -> ImageClass: image_classes = [] image_probabilities = [] # Show top categories per image top5_prob, top5_catid = torch.topk(probabilities, 5) for i in range(top5_prob.size(0)): image_classes.append(self.categories[top5_catid[i]]) image_probabilities.append(top5_prob[i].item()) return ImageClass( classes=image_classes, probabilities=image_probabilities, ) @serve.deployment
ImageClassifier
python
FactoryBoy__factory_boy
factory/declarations.py
{ "start": 7312, "end": 8569 }
class ____(BaseDeclaration): """Fill this value using the values returned by an iterator. Warning: the iterator should not end ! Attributes: iterator (iterable): the iterator whose value should be used. getter (callable or None): a function to parse returned values """ def __init__(self, iterator, cycle=True, getter=None): super().__init__() self.getter = getter self.iterator = None if cycle: self.iterator_builder = lambda: utils.ResetableIterator(itertools.cycle(iterator)) else: self.iterator_builder = lambda: utils.ResetableIterator(iterator) def evaluate(self, instance, step, extra): # Begin unrolling as late as possible. # This helps with ResetableIterator(MyModel.objects.all()) if self.iterator is None: self.iterator = self.iterator_builder() logger.debug("Iterator: Fetching next value from %r", self.iterator) value = next(iter(self.iterator)) if self.getter is None: return value return self.getter(value) def reset(self): """Reset the internal iterator.""" if self.iterator is not None: self.iterator.reset()
Iterator
python
huggingface__transformers
src/transformers/models/cwm/modeling_cwm.py
{ "start": 8390, "end": 11703 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: CwmConfig, layer_idx: int): super().__init__() self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = torch.nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = torch.nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = torch.nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) self.rotary_fn = apply_rotary_pos_emb self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, # main diff with Llama **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights @use_kernel_forward_from_hub("RMSNorm")
CwmAttention
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-weaviate/llama_index/vector_stores/weaviate/_exceptions.py
{ "start": 551, "end": 1012 }
class ____(Exception): """Exception raised when the async weaviate client was not provided via the `weaviate_client` parameter.""" def __init__( self, message="Async method called without WeaviateAsyncClient provided. Pass the async weaviate client to be used via `weaviate_client` to the constructor of WeaviateVectorStore.", ) -> None: self.message = message super().__init__(self.message)
AsyncClientNotProvidedError
python
streamlit__streamlit
lib/tests/streamlit/elements/slider_test.py
{ "start": 13375, "end": 15412 }
class ____(DeltaGeneratorTestCase): def test_slider_with_width_pixels(self): """Test that slider can be displayed with a specific width in pixels.""" st.slider("Label", min_value=0, max_value=10, width=500) element = self.get_delta_from_queue().new_element assert ( element.width_config.WhichOneof("width_spec") == WidthConfigFields.PIXEL_WIDTH.value ) assert element.width_config.pixel_width == 500 def test_slider_with_width_stretch(self): """Test that slider can be displayed with a width of 'stretch'.""" st.slider("Label", min_value=0, max_value=10, width="stretch") element = self.get_delta_from_queue().new_element assert ( element.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert element.width_config.use_stretch is True def test_slider_with_default_width(self): """Test that the default width is used when not specified.""" st.slider("Label", min_value=0, max_value=10) element = self.get_delta_from_queue().new_element assert ( element.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert element.width_config.use_stretch is True @parameterized.expand( [ ("invalid_string", "invalid"), ("negative", -1), ("zero", 0), ("float", 100.5), ] ) def test_width_config_invalid(self, name, invalid_width): """Test width config with various invalid values.""" with pytest.raises(StreamlitInvalidWidthError): st.slider("the label", width=invalid_width) def test_id_stability(): def script(): import streamlit as st st.slider("slider", key="slider") at = AppTest.from_function(script).run() s1 = at.slider[0] at = s1.set_value(5).run() s2 = at.slider[0] assert s1.id == s2.id
SliderWidthTest
python
getsentry__sentry
src/sentry/uptime/types.py
{ "start": 2578, "end": 2761 }
class ____(IntEnum): """ Used to identify what the current status of a uptime monitor is. """ NO_INCIDENT = 0 IN_INCIDENT = 1 @dataclass(frozen=True)
IncidentStatus
python
jmcnamara__XlsxWriter
xlsxwriter/test/worksheet/test_cond_format09.py
{ "start": 345, "end": 3720 }
class ____(unittest.TestCase): """ Test assembling a complete Worksheet file. """ def test_assemble_xml_file(self): """Test writing a worksheet with conditional formatting.""" self.maxDiff = None fh = StringIO() worksheet = Worksheet() worksheet._set_filehandle(fh) worksheet.select() worksheet.write("A1", 10) worksheet.write("A2", 20) worksheet.write("A3", 30) worksheet.write("A4", 40) worksheet.conditional_format( "A1:A4", { "type": "blanks", }, ) worksheet.conditional_format( "A1:A4", { "type": "no_blanks", "format": None, }, ) worksheet.conditional_format( "A1:A4", { "type": "errors", "format": None, }, ) worksheet.conditional_format( "A1:A4", { "type": "no_errors", "format": None, }, ) worksheet._assemble_xml_file() exp = _xml_to_list( """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships"> <dimension ref="A1:A4"/> <sheetViews> <sheetView tabSelected="1" workbookViewId="0"/> </sheetViews> <sheetFormatPr defaultRowHeight="15"/> <sheetData> <row r="1" spans="1:1"> <c r="A1"> <v>10</v> </c> </row> <row r="2" spans="1:1"> <c r="A2"> <v>20</v> </c> </row> <row r="3" spans="1:1"> <c r="A3"> <v>30</v> </c> </row> <row r="4" spans="1:1"> <c r="A4"> <v>40</v> </c> </row> </sheetData> <conditionalFormatting sqref="A1:A4"> <cfRule type="containsBlanks" priority="1"> <formula>LEN(TRIM(A1))=0</formula> </cfRule> <cfRule type="notContainsBlanks" priority="2"> <formula>LEN(TRIM(A1))&gt;0</formula> </cfRule> <cfRule type="containsErrors" priority="3"> <formula>ISERROR(A1)</formula> </cfRule> <cfRule type="notContainsErrors" priority="4"> <formula>NOT(ISERROR(A1))</formula> </cfRule> </conditionalFormatting> <pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/> </worksheet> """ ) got = _xml_to_list(fh.getvalue()) self.assertEqual(exp, got)
TestAssembleWorksheet
python
charliermarsh__ruff
crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring.py
{ "start": 1392, "end": 1859 }
class ____: """""" "" def docstring_that_ends_with_quote_and_a_line_break1(): """ he said "the news of my death have been greatly exaggerated" """ def docstring_that_ends_with_quote_and_a_line_break2(): """he said "the news of my death have been greatly exaggerated" """ def docstring_that_ends_with_quote_and_a_line_break3(): """he said "the news of my death have been greatly exaggerated" """
IgnoreImplicitlyConcatenatedStrings
python
getsentry__sentry
tests/sentry/runner/commands/test_cleanup.py
{ "start": 5062, "end": 6974 }
class ____(TestCase): def test_run_bulk_query_deletes_by_project(self) -> None: """Test that the function runs bulk query deletes by project as expected.""" days = 30 # Creating the groups in out of order to test that the chunks are created in the correct order. self.create_group(last_seen=before_now(days=days + 4)) self.create_group() self.create_group(last_seen=before_now(days=days + 2)) self.create_group(last_seen=before_now(days=days + 3)) assert Group.objects.count() == 4 assert Group.objects.filter(last_seen__lt=before_now(days=days)).count() == 3 ids = list( Group.objects.filter(last_seen__lt=before_now(days=days)).values_list("id", flat=True) ) with ( assume_test_silo_mode(SiloMode.REGION), patch("sentry.runner.commands.cleanup.DELETES_BY_PROJECT_CHUNK_SIZE", 2), ): task_queue = SynchronousTaskQueue() models_attempted: set[str] = set() run_bulk_deletes_by_project( task_queue=task_queue, # type: ignore[arg-type] # It partially implements the queue protocol project_id=None, start_from_project_id=None, is_filtered=lambda model: False, days=days, models_attempted=models_attempted, ) assert models_attempted == {"group", "projectdebugfile"} assert len(task_queue.put_calls) == 2 # Verify we deleted all expected groups (order may vary due to non-unique last_seen) all_deleted_ids: set[int] = set() for call in task_queue.put_calls: assert call[0] == "sentry.models.group.Group" all_deleted_ids.update(call[1]) assert all_deleted_ids == set(ids) assert Group.objects.all().count() == 1
RunBulkQueryDeletesByProjectTest
python
scipy__scipy
scipy/integrate/_rules/_base.py
{ "start": 100, "end": 5977 }
class ____: """ Base class for numerical integration algorithms (cubatures). Finds an estimate for the integral of ``f`` over the region described by two arrays ``a`` and ``b`` via `estimate`, and find an estimate for the error of this approximation via `estimate_error`. If a subclass does not implement its own `estimate_error`, then it will use a default error estimate based on the difference between the estimate over the whole region and the sum of estimates over that region divided into ``2^ndim`` subregions. See Also -------- FixedRule Examples -------- In the following, a custom rule is created which uses 3D Genz-Malik cubature for the estimate of the integral, and the difference between this estimate and a less accurate estimate using 5-node Gauss-Legendre quadrature as an estimate for the error. >>> import numpy as np >>> from scipy.integrate import cubature >>> from scipy.integrate._rules import ( ... Rule, ProductNestedFixed, GenzMalikCubature, GaussLegendreQuadrature ... ) >>> def f(x, r, alphas): ... # f(x) = cos(2*pi*r + alpha @ x) ... # Need to allow r and alphas to be arbitrary shape ... npoints, ndim = x.shape[0], x.shape[-1] ... alphas_reshaped = alphas[np.newaxis, :] ... x_reshaped = x.reshape(npoints, *([1]*(len(alphas.shape) - 1)), ndim) ... return np.cos(2*np.pi*r + np.sum(alphas_reshaped * x_reshaped, axis=-1)) >>> genz = GenzMalikCubature(ndim=3) >>> gauss = GaussKronrodQuadrature(npoints=21) >>> # Gauss-Kronrod is 1D, so we find the 3D product rule: >>> gauss_3d = ProductNestedFixed([gauss, gauss, gauss]) >>> class CustomRule(Rule): ... def estimate(self, f, a, b, args=()): ... return genz.estimate(f, a, b, args) ... def estimate_error(self, f, a, b, args=()): ... return np.abs( ... genz.estimate(f, a, b, args) ... - gauss_3d.estimate(f, a, b, args) ... ) >>> rng = np.random.default_rng() >>> res = cubature( ... f=f, ... a=np.array([0, 0, 0]), ... b=np.array([1, 1, 1]), ... rule=CustomRule(), ... args=(rng.random((2,)), rng.random((3, 2, 3))) ... ) >>> res.estimate array([[-0.95179502, 0.12444608], [-0.96247411, 0.60866385], [-0.97360014, 0.25515587]]) """ def estimate(self, f, a, b, args=()): r""" Calculate estimate of integral of `f` in rectangular region described by corners `a` and ``b``. Parameters ---------- f : callable Function to integrate. `f` must have the signature:: f(x : ndarray, \*args) -> ndarray `f` should accept arrays ``x`` of shape:: (npoints, ndim) and output arrays of shape:: (npoints, output_dim_1, ..., output_dim_n) In this case, `estimate` will return arrays of shape:: (output_dim_1, ..., output_dim_n) a, b : ndarray Lower and upper limits of integration as rank-1 arrays specifying the left and right endpoints of the intervals being integrated over. Infinite limits are currently not supported. args : tuple, optional Additional positional args passed to ``f``, if any. Returns ------- est : ndarray Result of estimation. If `f` returns arrays of shape ``(npoints, output_dim_1, ..., output_dim_n)``, then `est` will be of shape ``(output_dim_1, ..., output_dim_n)``. """ raise NotImplementedError def estimate_error(self, f, a, b, args=()): r""" Estimate the error of the approximation for the integral of `f` in rectangular region described by corners `a` and `b`. If a subclass does not override this method, then a default error estimator is used. This estimates the error as ``|est - refined_est|`` where ``est`` is ``estimate(f, a, b)`` and ``refined_est`` is the sum of ``estimate(f, a_k, b_k)`` where ``a_k, b_k`` are the coordinates of each subregion of the region described by ``a`` and ``b``. In the 1D case, this is equivalent to comparing the integral over an entire interval ``[a, b]`` to the sum of the integrals over the left and right subintervals, ``[a, (a+b)/2]`` and ``[(a+b)/2, b]``. Parameters ---------- f : callable Function to estimate error for. `f` must have the signature:: f(x : ndarray, \*args) -> ndarray `f` should accept arrays `x` of shape:: (npoints, ndim) and output arrays of shape:: (npoints, output_dim_1, ..., output_dim_n) In this case, `estimate` will return arrays of shape:: (output_dim_1, ..., output_dim_n) a, b : ndarray Lower and upper limits of integration as rank-1 arrays specifying the left and right endpoints of the intervals being integrated over. Infinite limits are currently not supported. args : tuple, optional Additional positional args passed to `f`, if any. Returns ------- err_est : ndarray Result of error estimation. If `f` returns arrays of shape ``(npoints, output_dim_1, ..., output_dim_n)``, then `est` will be of shape ``(output_dim_1, ..., output_dim_n)``. """ est = self.estimate(f, a, b, args) refined_est = 0 for a_k, b_k in _split_subregion(a, b): refined_est += self.estimate(f, a_k, b_k, args) return self.xp.abs(est - refined_est)
Rule
python
walkccc__LeetCode
solutions/1081. Smallest Subsequence of Distinct Characters/1081.py
{ "start": 0, "end": 441 }
class ____: def smallestSubsequence(self, text: str) -> str: ans = [] count = collections.Counter(text) used = [False] * 26 for c in text: count[c] -= 1 if used[ord(c) - ord('a')]: continue while ans and ans[-1] > c and count[ans[-1]] > 0: used[ord(ans[-1]) - ord('a')] = False ans.pop() ans.append(c) used[ord(ans[-1]) - ord('a')] = True return ''.join(ans)
Solution
python
pytorch__pytorch
torch/distributed/_local_tensor/__init__.py
{ "start": 32601, "end": 42242 }
class ____(torch.Tensor): """ LocalTensor is a Tensor subclass that simulates a tensor distributed across multiple SPMD (Single Program, Multiple Data) ranks. Each LocalTensor instance internally holds a mapping from global rank ids to their corresponding local Tensor shards.Operations performed on a LocalTensor are applied independently to each local shard, mimicking distributed computation. Collectives and other distributed operations are handled by mapping them to the local shards as appropriate. Note: This class is primarily intended for debugging and simulating distributed tensor computations on a single process. """ # Map from global rank to the local tensor. _local_tensors: dict[int, torch.Tensor] # Precomputed for speed set of keys from the local tensor map. _ranks: frozenset[int] _size: list[torch.SymInt | int] __slots__ = ["_local_tensors", "_ranks", "_size"] @staticmethod @torch._disable_dynamo def __new__( cls, local_tensors: dict[int, torch.Tensor], requires_grad: bool = False, ) -> "LocalTensor": if any(t.requires_grad for t in local_tensors.values()): raise AssertionError( "Internal local_tensors require grad, but we will ignore those autograd graph. " "Make a custom autograd function and make sure you detach the inner tensors." ) (shape, strides, device, dtype, layout, extra_dispatch_keys) = ( _compute_local_tensor_meta(local_tensors) ) r = torch.Tensor._make_wrapper_subclass( cls, shape, strides=strides, dtype=dtype, device=device, layout=layout, # In place ops potentially change local tensor sizes (e.g. resize_). While # executing an in-place op the return value must be the same as "self" input # otherwise we can introduce errors due to tensor identity changes. Hence we # need to be able to update wrapper subclass sizes after in-place ops. This # dispatch policy allows us to do that. dispatch_sizes_strides_policy="sizes", requires_grad=requires_grad, _extra_dispatch_keys=extra_dispatch_keys, ) local_tensors = { r: v if not isinstance(v, AsyncCollectiveTensor) else v.wait() for r, v in local_tensors.items() } r._local_tensors = local_tensors r._ranks = frozenset(local_tensors.keys()) r._size = shape return r @torch._disable_dynamo @mark_subclass_constructor_exportable_experimental # type: ignore[misc] def __init__(self, *args: Any, **kwargs: Any): super().__init__() def __deepcopy__(self, memo: dict[Any, Any] | None) -> "LocalTensor": local_tensors_copy = { r: copy.deepcopy(t, memo) for r, t in self._local_tensors.items() } # pyrefly: ignore [bad-argument-type, bad-argument-count] return LocalTensor(local_tensors_copy, self.requires_grad) def __repr__(self) -> str: # type: ignore[override] parts = [] for k, v in self._local_tensors.items(): # pyrefly: ignore [bad-argument-type] parts.append(f" {k}: {v}") tensors_str = ",\n".join(parts) return f"LocalTensor(\n{tensors_str}\n)" def __getattr__(self, name: str) -> Any: if _is_local_tensor_attr(name): rank = _from_local_tensor_attr(name) if rank not in self._ranks: raise AttributeError(f"Local tensor has no knowledge of rank {rank}") return self._local_tensors[rank] return object.__getattribute__(self, name) def __tensor_flatten__(self) -> tuple[list[str], tuple[Any, ...]]: """ protocol to inform how to flatten a DTensor to local tensor for PT2 tracing """ local_tensor_attrs = [_to_local_tensor_attr(r) for r in self._ranks] return local_tensor_attrs, () @staticmethod def __tensor_unflatten__( inner_tensors: dict[str, Any], flatten_spec: tuple[Any, ...], outer_size: torch.Size, outer_stride: tuple[int, ...], ) -> "LocalTensor": assert flatten_spec is not None, ( "Expecting spec to be not None from `__tensor_flatten__` return value!" ) local_tensors = { _from_local_tensor_attr(a): t for a, t in inner_tensors.items() } # pyrefly: ignore [bad-argument-type, bad-argument-count] return LocalTensor(local_tensors) @classmethod @torch._disable_dynamo def __torch_dispatch__( # type: ignore[override] cls, func: Any, types: tuple[Any, ...], args: tuple[Any, ...] = (), kwargs: dict[str, Any] | None = None, ) -> Any: if kwargs is None: kwargs = {} # This is horribly inefficient flat_args, args_spec = pytree.tree_flatten((args, kwargs)) local_tensor = None for arg in flat_args: if isinstance(arg, LocalTensor): local_tensor = arg break assert local_tensor is not None, ( "At least one of the arguments must be a LocalTensor" ) # Check for unrecognized tensor subclasses (but allow regular tensors and scalars) has_unrecognized_types = _check_for_subclass(flat_args) if has_unrecognized_types: unrecognized_types = [ type(x) for x in flat_args if _check_for_subclass_arg(x) ] not_implemented_log.debug( "LocalTensor unrecognized subclass(es): %s", unrecognized_types ) return NotImplemented with LocalTensorMode(local_tensor._ranks): return func(*args, **kwargs) def numpy(self, *, force: bool = False) -> Any: if HAS_NUMPY: return self.reconcile().numpy(force=force) else: raise RuntimeError("Numpy is not available") def contiguous( self, memory_format: torch.memory_format = torch.contiguous_format, ) -> torch.Tensor: # pyrefly: ignore [bad-argument-type] return LocalTensor( # pyrefly: ignore [bad-argument-count] { r: t.contiguous(memory_format=memory_format) for r, t in self._local_tensors.items() } ) def is_contiguous( self, memory_format: torch.memory_format = torch.contiguous_format, ) -> bool: return all( t.is_contiguous(memory_format=memory_format) for t in self._local_tensors.values() ) def tolist(self) -> list[Any]: """ Try to reconcile, if successful convert to list, otherwise if dtype is integer, convert to list of local integers. """ equal_obj = self._equal_local_tensors() if isinstance(equal_obj, torch.Tensor): return equal_obj.tolist() if isinstance(equal_obj, torch.Size): if not self.dtype.is_floating_point and not self.dtype.is_complex: ranks = sorted(self._ranks) local_lists = [self._local_tensors[r].tolist() for r in ranks] return _reduce_multidim_lists( local_lists, lambda values: torch.SymInt( LocalIntNode(dict(zip(ranks, values, strict=True))) ), ) raise RuntimeError("Cannot convert local tensor to list") def reconcile(self) -> torch.Tensor: """ Reconciles the LocalTensor into a single torch.Tensor by ensuring all local shards are identical and returning a detached clone of one of them. Note: This method is useful for extracting a representative tensor from a LocalTensor when all shards are expected to be the same, such as after a collective operation that synchronizes all ranks. """ # Force all local tensor shards across ranks to be the same equal_obj = self._equal_local_tensors() assert isinstance(equal_obj, torch.Tensor), ( "LocalTensor shards must be the same to reconcile" ) cl = equal_obj.clone().detach() cl.requires_grad_(self.requires_grad) return cl def _equal_local_tensors(self) -> torch.Tensor | torch.Size | None: it = iter(self._local_tensors.values()) t1 = next(it) if all(t2.equal(t1) for t2 in it): return t1 if all(t2.shape == t1.shape for t2 in it): return t1.shape return None def _sync_meta(self) -> None: with no_dispatch(): (shape, strides, device, dtype, layout, extra_dispatch_keys) = ( _compute_local_tensor_meta(self._local_tensors) ) self._size = shape _GLOBAL_LOCAL_TENSOR_MODE: list["LocalTensorMode"] = [] # When running under local runner each thread must create its own local tensor mode # so that they do not interfere with each other. _THREAD_LOCAL_TENSOR_MODE: threading.local = threading.local() def get_local_tensor_mode_list() -> list["LocalTensorMode"]: if not hasattr(_THREAD_LOCAL_TENSOR_MODE, "value"): _THREAD_LOCAL_TENSOR_MODE.value = [] if len(_THREAD_LOCAL_TENSOR_MODE.value) > 0: return _THREAD_LOCAL_TENSOR_MODE.value return _GLOBAL_LOCAL_TENSOR_MODE
LocalTensor
python
Textualize__textual
src/textual/_markup_playground.py
{ "start": 236, "end": 4957 }
class ____(App): TITLE = "Markup Playground" CSS = """ Screen { layout: vertical; #editor { width: 1fr; height: 1fr; border: tab $foreground 50%; padding: 1; margin: 1 0 0 0; &:focus { border: tab $primary; } } #variables { width: 1fr; height: 1fr; border: tab $foreground 50%; padding: 1; margin: 1 0 0 1; &:focus { border: tab $primary; } } #variables.-bad-json { border: tab $error; } #results-container { border: tab $success; &.-error { border: tab $error; } overflow-y: auto; } #results { padding: 1 1; width: 1fr; } #spans-container { border: tab $success; overflow-y: auto; margin: 0 0 0 1; } #spans { padding: 1 1; width: 1fr; } HorizontalGroup { height: 1fr; } } """ AUTO_FOCUS = "#editor" BINDINGS = [ ("f1", "toggle('show_variables')", "Variables"), ("f2", "toggle('show_spans')", "Spans"), ] variables: reactive[dict[str, object]] = reactive({}) show_variables = reactive(True) show_spans = reactive(False) def compose(self) -> ComposeResult: with containers.HorizontalGroup(): yield (editor := TextArea(id="editor", soft_wrap=False)) yield (variables := TextArea("", id="variables", language="json")) editor.border_title = "Markup" variables.border_title = "Variables (JSON)" with containers.HorizontalGroup(): with containers.VerticalScroll(id="results-container") as container: yield Static(id="results") container.border_title = "Output" with containers.VerticalScroll(id="spans-container") as container: yield Pretty([], id="spans") container.border_title = "Spans" yield Footer() def watch_show_variables(self, show_variables: bool) -> None: self.query_one("#variables").display = show_variables def watch_show_spans(self, show_spans: bool) -> None: self.query_one("#spans-container").display = show_spans @on(TextArea.Changed, "#editor") def on_markup_changed(self, event: TextArea.Changed) -> None: self.update_markup() def update_markup(self) -> None: results = self.query_one("#results", Static) editor = self.query_one("#editor", TextArea) spans = self.query_one("#spans", Pretty) try: content = Content.from_markup(editor.text, **self.variables) results.update(content) spans.update(content.spans) except Exception: from rich.traceback import Traceback results.update(Traceback()) spans.update([]) self.query_one("#results-container").add_class("-error").scroll_end( animate=False ) else: self.query_one("#results-container").remove_class("-error") def watch_variables(self, variables: dict[str, object]) -> None: self.update_markup() @on(TextArea.Changed, "#variables") def on_variables_change(self, event: TextArea.Changed) -> None: variables_text_area = self.query_one("#variables", TextArea) try: variables = json.loads(variables_text_area.text) except Exception as error: variables_text_area.add_class("-bad-json") self.variables = {} else: variables_text_area.remove_class("-bad-json") self.variables = variables @on(events.DescendantBlur, "#variables") def on_variables_blur(self) -> None: variables_text_area = self.query_one("#variables", TextArea) try: variables = json.loads(variables_text_area.text) except Exception as error: if not variables_text_area.has_class("-bad-json"): self.notify(f"Bad JSON: ${error}", title="Variables", severity="error") variables_text_area.add_class("-bad-json") else: variables_text_area.remove_class("-bad-json") variables_text_area.text = json.dumps(variables, indent=4) self.variables = variables
MarkupPlayground
python
readthedocs__readthedocs.org
readthedocs/search/api/v3/tests/test_api.py
{ "start": 2655, "end": 13365 }
class ____(SearchTestBase): def setUp(self): super().setUp() self.user = get(User) self.another_user = get(User) self.project = get( Project, slug="project", users=[self.user], privacy_level=PUBLIC ) self.another_project = get( Project, slug="another-project", users=[self.another_user], privacy_level=PUBLIC, ) self.project.versions.update(privacy_level=PUBLIC, active=True, built=True) self.version = self.project.versions.first() self.another_project.versions.update( privacy_level=PUBLIC, active=True, built=True ) self.another_version = self.another_project.versions.first() self.url = reverse("search_api_v3") self.client.force_login(self.user) for version in Version.objects.all(): self.create_index(version) def get(self, *args, **kwargs): return self.client.get(*args, **kwargs) def test_search_no_projects(self): resp = self.get(self.url, data={"q": "test"}) self.assertEqual(resp.status_code, 200) results = resp.data["results"] projects = resp.data["projects"] self.assertEqual(results, []) self.assertEqual(projects, []) self.assertEqual(resp.data["query"], "test") def test_search_project(self): resp = self.get(self.url, data={"q": "project:project test"}) self.assertEqual(resp.status_code, 200) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [{"slug": "project", "versions": [{"slug": "latest"}]}] ) self.assertEqual(len(results), 1) self.assertEqual(resp.data["query"], "test") def test_search_project_explicit_version(self): resp = self.get(self.url, data={"q": "project:project/latest test"}) self.assertEqual(resp.status_code, 200) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [{"slug": "project", "versions": [{"slug": "latest"}]}] ) self.assertEqual(len(results), 1) self.assertEqual(resp.data["query"], "test") new_version = get( Version, project=self.project, slug="v2", privacy_level=PUBLIC, built=True, active=True, ) self.create_index(new_version) resp = self.get(self.url, data={"q": "project:project/v2 test"}) self.assertEqual(resp.status_code, 200) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual(projects, [{"slug": "project", "versions": [{"slug": "v2"}]}]) self.assertEqual(len(results), 1) self.assertEqual(resp.data["query"], "test") def test_search_project_explicit_version_unexisting(self): resp = self.get(self.url, data={"q": "project:project/v3 test"}) self.assertEqual(resp.status_code, 200) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual(projects, []) self.assertEqual(results, []) self.assertEqual(resp.data["query"], "test") def test_search_project_unexisting(self): resp = self.get(self.url, data={"q": "project:foobar/latest test"}) self.assertEqual(resp.status_code, 200) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual(projects, []) self.assertEqual(results, []) self.assertEqual(resp.data["query"], "test") def test_search_project_valid_and_invalid(self): resp = self.get( self.url, data={"q": "project:foobar/latest project:project/latest test"} ) self.assertEqual(resp.status_code, 200) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [{"slug": "project", "versions": [{"slug": "latest"}]}] ) self.assertEqual(len(results), 1) self.assertEqual(resp.data["query"], "test") def test_search_multiple_projects(self): resp = self.get( self.url, data={"q": "project:project project:another-project test"} ) self.assertEqual(resp.status_code, 200) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [ {"slug": "project", "versions": [{"slug": "latest"}]}, {"slug": "another-project", "versions": [{"slug": "latest"}]}, ], ) self.assertEqual(len(results), 2) self.assertEqual(resp.data["query"], "test") def test_search_user_me_anonymous_user(self): self.client.logout() resp = self.get(self.url, data={"q": "user:@me test"}) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual(projects, []) self.assertEqual(results, []) self.assertEqual(resp.data["query"], "test") def test_search_user_me_logged_in_user(self): self.client.force_login(self.user) resp = self.get(self.url, data={"q": "user:@me test"}) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [{"slug": "project", "versions": [{"slug": "latest"}]}] ) self.assertEqual(len(results), 1) self.assertEqual(resp.data["query"], "test") self.client.force_login(self.another_user) resp = self.get(self.url, data={"q": "user:@me test"}) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [{"slug": "another-project", "versions": [{"slug": "latest"}]}] ) self.assertEqual(len(results), 1) self.assertEqual(resp.data["query"], "test") def test_search_user_invalid_value(self): self.client.force_login(self.user) resp = self.get(self.url, data={"q": "user:test test"}) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual(projects, []) self.assertEqual(results, []) self.assertEqual(resp.data["query"], "test") def test_search_user_and_project(self): self.client.force_login(self.user) resp = self.get( self.url, data={"q": "user:@me project:another-project/latest test"} ) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [ {"slug": "another-project", "versions": [{"slug": "latest"}]}, {"slug": "project", "versions": [{"slug": "latest"}]}, ], ) self.assertEqual(len(results), 2) self.assertEqual(resp.data["query"], "test") def test_search_subprojects(self): subproject = get( Project, slug="subproject", users=[self.user], privacy_level=PUBLIC ) self.project.add_subproject(subproject) get(Version, slug="v2", project=self.project, active=True, built=True) get(Version, slug="v3", project=self.project, active=True, built=True) get(Version, slug="v2", project=subproject, active=True, built=True) get(Version, slug="v4", project=subproject, active=True, built=True) subproject.versions.update(built=True, active=True, privacy_level=PUBLIC) self.project.versions.update(built=True, active=True, privacy_level=PUBLIC) for version in itertools.chain( subproject.versions.all(), self.project.versions.all() ): self.create_index(version) # Search default version of the project and its subprojects. resp = self.get(self.url, data={"q": "subprojects:project test"}) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [ {"slug": "project", "versions": [{"slug": "latest"}]}, {"slug": "subproject", "versions": [{"slug": "latest"}]}, ], ) self.assertEqual(len(results), 2) self.assertEqual(resp.data["query"], "test") # Explicitly search on the latest version. resp = self.get(self.url, data={"q": "subprojects:project/latest test"}) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [ {"slug": "project", "versions": [{"slug": "latest"}]}, {"slug": "subproject", "versions": [{"slug": "latest"}]}, ], ) self.assertEqual(len(results), 2) self.assertEqual(resp.data["query"], "test") # Explicitly search on the v2 version. resp = self.get(self.url, data={"q": "subprojects:project/v2 test"}) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [ {"slug": "project", "versions": [{"slug": "v2"}]}, {"slug": "subproject", "versions": [{"slug": "v2"}]}, ], ) self.assertEqual(len(results), 2) self.assertEqual(resp.data["query"], "test") # Explicitly search on the v3 version. # Only the main project has this version, # we will default to the default version of its subproject. resp = self.get(self.url, data={"q": "subprojects:project/v3 test"}) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [ {"slug": "project", "versions": [{"slug": "v3"}]}, {"slug": "subproject", "versions": [{"slug": "latest"}]}, ], ) self.assertEqual(len(results), 2) self.assertEqual(resp.data["query"], "test") # Explicitly search on the v4 version. # The main project doesn't have this version, # we include results from its subprojects only. resp = self.get(self.url, data={"q": "subprojects:project/v4 test"}) projects = resp.data["projects"] results = resp.data["results"] self.assertEqual( projects, [{"slug": "subproject", "versions": [{"slug": "v4"}]}] ) self.assertEqual(len(results), 1) self.assertEqual(resp.data["query"], "test") @pytest.mark.proxito @override_settings(PUBLIC_DOMAIN="readthedocs.io")
SearchAPITest
python
scipy__scipy
scipy/stats/tests/test_distributions.py
{ "start": 258694, "end": 259558 }
class ____: def test_docstrings(self): # See ticket #761 if stats.rayleigh.__doc__ is not None: assert_("rayleigh" in stats.rayleigh.__doc__.lower()) if stats.bernoulli.__doc__ is not None: assert_("bernoulli" in stats.bernoulli.__doc__.lower()) def test_no_name_arg(self): # If name is not given, construction shouldn't fail. See #1508. stats.rv_continuous() stats.rv_discrete() def test_args_reduce(): a = array([1, 3, 2, 1, 2, 3, 3]) b, c = argsreduce(a > 1, a, 2) assert_array_equal(b, [3, 2, 2, 3, 3]) assert_array_equal(c, [2]) b, c = argsreduce(2 > 1, a, 2) assert_array_equal(b, a) assert_array_equal(c, [2] * np.size(a)) b, c = argsreduce(a > 0, a, 2) assert_array_equal(b, a) assert_array_equal(c, [2] * np.size(a))
TestDocstring
python
pytorch__pytorch
torch/distributed/tensor/examples/torchrec_sharding_example.py
{ "start": 1000, "end": 16727 }
class ____(torch.Tensor): local_shards: list[torch.Tensor] storage_meta: TensorStorageMetadata @staticmethod def __new__( cls, local_shards: list[torch.Tensor], offsets: list[torch.Size] ) -> "LocalShardsWrapper": assert len(local_shards) > 0 assert len(local_shards) == len(offsets) assert local_shards[0].ndim == 2 # we calculate the total tensor size by "concat" on second tensor dimension cat_tensor_shape = list(local_shards[0].shape) if len(local_shards) > 1: # column-wise sharding for shard_size in [s.shape for s in local_shards[1:]]: cat_tensor_shape[1] += shard_size[1] # according to DCP, each chunk is expected to have the same properties of the # TensorStorageMetadata that includes it. Vice versa, the wrapper's properties # should also be the same with that of its first chunk. wrapper_properties = TensorProperties.create_from_tensor(local_shards[0]) wrapper_shape = torch.Size(cat_tensor_shape) chunks_meta = [ ChunkStorageMetadata(o, s.shape) for s, o in zip(local_shards, offsets) ] r = torch.Tensor._make_wrapper_subclass( cls, wrapper_shape, ) r.shards = local_shards r.storage_meta = TensorStorageMetadata( properties=wrapper_properties, size=wrapper_shape, chunks=chunks_meta, ) return r # necessary for ops dispatching from this subclass to its local shards @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): # type: ignore[override] kwargs = kwargs or {} # TODO: we shall continually extend this function to support more ops if needed if func in supported_ops: res_shards_list = [ func(shard, *args[1:], **kwargs) # pyrefly: ignore [index-error] for shard in args[0].shards ] # pyrefly: ignore [index-error] return LocalShardsWrapper(res_shards_list, args[0].shard_offsets) else: raise NotImplementedError( f"{func} is not supported for LocalShardsWrapper!" ) @property def shards(self) -> list[torch.Tensor]: return self.local_shards @shards.setter def shards(self, local_shards: list[torch.Tensor]): self.local_shards = local_shards @cached_property def shard_sizes(self) -> list[torch.Size]: return [chunk.sizes for chunk in self.storage_meta.chunks] @cached_property def shard_offsets(self) -> list[torch.Size]: return [chunk.offsets for chunk in self.storage_meta.chunks] def run_torchrec_row_wise_even_sharding_example(rank, world_size): # row-wise even sharding example: # One table is evenly sharded by rows within the global ProcessGroup. # In our example, the table's num_embedding is 8, and the embedding dim is 16 # The global ProcessGroup has 4 ranks, so each rank will have one 2 by 16 local # shard. # device mesh is a representation of the worker ranks # create a 1-D device mesh that includes every rank device_type = get_device_type() device = torch.device(device_type) device_mesh = init_device_mesh(device_type=device_type, mesh_shape=(world_size,)) # manually create the embedding table's local shards num_embeddings = 8 embedding_dim = 16 # tensor shape local_shard_shape = torch.Size( [num_embeddings // world_size, embedding_dim] # (local_rows, local_cols) ) # tensor offset local_shard_offset = torch.Size((rank * 2, embedding_dim)) # tensor local_tensor = torch.randn(local_shard_shape, device=device) # row-wise sharding: one shard per rank # create the local shards wrapper # pyrefly: ignore [no-matching-overload] local_shards_wrapper = LocalShardsWrapper( local_shards=[local_tensor], offsets=[local_shard_offset], ) ########################################################################### # example 1: transform local_shards into DTensor # usage in TorchRec: # ShardedEmbeddingCollection stores model parallel params in # _model_parallel_name_to_sharded_tensor which is initialized in # _initialize_torch_state() and torch.Tensor params are transformed # into ShardedTensor by ShardedTensor._init_from_local_shards(). # # This allows state_dict() to always return ShardedTensor objects. # this is the sharding placement we use in DTensor to represent row-wise sharding # row_wise_sharding_placements means that the global tensor is sharded by first dim # over the 1-d mesh. row_wise_sharding_placements: list[Placement] = [Shard(0)] # create a DTensor from the local shard dtensor = DTensor.from_local( local_shards_wrapper, device_mesh, row_wise_sharding_placements, run_check=False ) # display the DTensor's sharding visualize_sharding(dtensor, header="Row-wise even sharding example in DTensor") ########################################################################### # example 2: transform DTensor into local_shards # usage in TorchRec: # In ShardedEmbeddingCollection's load_state_dict pre hook # _pre_load_state_dict_hook, if the source param is a ShardedTensor # then we need to transform it into its local_shards. # transform DTensor into LocalShardsWrapper dtensor_local_shards = dtensor.to_local() assert isinstance(dtensor_local_shards, LocalShardsWrapper) shard_tensor = dtensor_local_shards.shards[0] assert torch.equal(shard_tensor, local_tensor) assert dtensor_local_shards.shard_sizes[0] == local_shard_shape # unwrap shape assert dtensor_local_shards.shard_offsets[0] == local_shard_offset # unwrap offset def run_torchrec_row_wise_uneven_sharding_example(rank, world_size): # row-wise uneven sharding example: # One table is unevenly sharded by rows within the global ProcessGroup. # In our example, the table's num_embedding is 8, and the embedding dim is 16 # The global ProcessGroup has 4 ranks, and each rank will have the local shard # of shape: # rank 0: [1, 16] # rank 1: [3, 16] # rank 2: [1, 16] # rank 3: [3, 16] # device mesh is a representation of the worker ranks # create a 1-D device mesh that includes every rank device_type = get_device_type() device = torch.device(device_type) device_mesh = init_device_mesh(device_type=device_type, mesh_shape=(world_size,)) # manually create the embedding table's local shards num_embeddings = 8 embedding_dim = 16 emb_table_shape = torch.Size([num_embeddings, embedding_dim]) # tensor shape local_shard_shape = ( torch.Size([1, embedding_dim]) if rank % 2 == 0 else torch.Size([3, embedding_dim]) ) # tensor offset local_shard_offset = torch.Size((rank // 2 * 4 + rank % 2 * 1, embedding_dim)) # tensor local_tensor = torch.randn(local_shard_shape, device=device) # local shards # row-wise sharding: one shard per rank # create the local shards wrapper # pyrefly: ignore [no-matching-overload] local_shards_wrapper = LocalShardsWrapper( local_shards=[local_tensor], offsets=[local_shard_offset], ) ########################################################################### # example 1: transform local_shards into DTensor # create the DTensorMetadata which torchrec should provide row_wise_sharding_placements: list[Placement] = [Shard(0)] # note: for uneven sharding, we need to specify the shape and stride because # DTensor would assume even sharding and compute shape/stride based on the # assumption. Torchrec needs to pass in this information explicitly. # shape/stride are global tensor's shape and stride dtensor = DTensor.from_local( local_shards_wrapper, # a torch.Tensor subclass device_mesh, # DeviceMesh row_wise_sharding_placements, # List[Placement] run_check=False, shape=emb_table_shape, # this is required for uneven sharding stride=(embedding_dim, 1), ) # so far visualize_sharding() cannot print correctly for unevenly sharded DTensor # because it relies on offset computation which assumes even sharding. visualize_sharding(dtensor, header="Row-wise uneven sharding example in DTensor") # check the dtensor has the correct shape and stride on all ranks assert dtensor.shape == emb_table_shape assert dtensor.stride() == (embedding_dim, 1) ########################################################################### # example 2: transform DTensor into local_shards # note: DTensor.to_local() always returns a LocalShardsWrapper dtensor_local_shards = dtensor.to_local() assert isinstance(dtensor_local_shards, LocalShardsWrapper) shard_tensor = dtensor_local_shards.shards[0] assert torch.equal(shard_tensor, local_tensor) assert dtensor_local_shards.shard_sizes[0] == local_shard_shape # unwrap shape assert dtensor_local_shards.shard_offsets[0] == local_shard_offset # unwrap offset def run_torchrec_table_wise_sharding_example(rank, world_size): # table-wise example: # each rank in the global ProcessGroup holds one different table. # In our example, the table's num_embedding is 8, and the embedding dim is 16 # The global ProcessGroup has 4 ranks, so each rank will have one 8 by 16 complete # table as its local shard. device_type = get_device_type() device = torch.device(device_type) # note: without initializing this mesh, the following local_tensor will be put on # device cuda:0. init_device_mesh(device_type=device_type, mesh_shape=(world_size,)) # manually create the embedding table's local shards num_embeddings = 8 embedding_dim = 16 emb_table_shape = torch.Size([num_embeddings, embedding_dim]) # for table i, if the current rank holds the table, then the local shard is # a LocalShardsWrapper containing the tensor; otherwise the local shard is # an empty torch.Tensor table_to_shards = {} # map {table_id: local shard of table_id} table_to_local_tensor = {} # map {table_id: local tensor of table_id} # create 4 embedding tables and place them on different ranks # each rank will hold one complete table, and the dict will store # the corresponding local shard. for i in range(world_size): # tensor local_tensor = ( torch.randn(*emb_table_shape, device=device) if rank == i else torch.empty(0, device=device) ) table_to_local_tensor[i] = local_tensor # tensor offset local_shard_offset = torch.Size((0, 0)) # wrap local shards into a wrapper local_shards_wrapper = ( # pyrefly: ignore [no-matching-overload] LocalShardsWrapper( local_shards=[local_tensor], offsets=[local_shard_offset], ) if rank == i else local_tensor ) table_to_shards[i] = local_shards_wrapper ########################################################################### # example 1: transform local_shards into DTensor table_to_dtensor = {} # same purpose as _model_parallel_name_to_sharded_tensor table_wise_sharding_placements = [Replicate()] # table-wise sharding for table_id, local_shards in table_to_shards.items(): # create a submesh that only contains the rank we place the table # note that we cannot use ``init_device_mesh'' to create a submesh # so we choose to use the `DeviceMesh` api to directly create a DeviceMesh device_submesh = DeviceMesh( device_type=device_type, mesh=torch.tensor( [table_id], dtype=torch.int64 ), # table ``table_id`` is placed on rank ``table_id`` ) # create a DTensor from the local shard for the current table # note: for uneven sharding, we need to specify the shape and stride because # DTensor would assume even sharding and compute shape/stride based on the # assumption. Torchrec needs to pass in this information explicitly. dtensor = DTensor.from_local( local_shards, device_submesh, table_wise_sharding_placements, run_check=False, shape=emb_table_shape, # this is required for uneven sharding stride=(embedding_dim, 1), ) table_to_dtensor[table_id] = dtensor # print each table's sharding for table_id, dtensor in table_to_dtensor.items(): visualize_sharding( dtensor, header=f"Table-wise sharding example in DTensor for Table {table_id}", ) # check the dtensor has the correct shape and stride on all ranks assert dtensor.shape == emb_table_shape assert dtensor.stride() == (embedding_dim, 1) ########################################################################### # example 2: transform DTensor into torch.Tensor for table_id, local_tensor in table_to_local_tensor.items(): # important: note that DTensor.to_local() always returns an empty torch.Tensor # no matter what was passed to DTensor._local_tensor. dtensor_local_shards = table_to_dtensor[table_id].to_local() if rank == table_id: assert isinstance(dtensor_local_shards, LocalShardsWrapper) shard_tensor = dtensor_local_shards.shards[0] assert torch.equal(shard_tensor, local_tensor) # unwrap tensor assert ( dtensor_local_shards.shard_sizes[0] == emb_table_shape ) # unwrap shape assert dtensor_local_shards.shard_offsets[0] == torch.Size( (0, 0) ) # unwrap offset else: assert dtensor_local_shards.numel() == 0 def run_example(rank, world_size, example_name): # the dict that stores example code name_to_example_code = { "row-wise-even": run_torchrec_row_wise_even_sharding_example, "row-wise-uneven": run_torchrec_row_wise_uneven_sharding_example, "table-wise": run_torchrec_table_wise_sharding_example, } if example_name not in name_to_example_code: print(f"example for {example_name} does not exist!") return # the example to run example_func = name_to_example_code[example_name] # set manual seed torch.manual_seed(0) # run the example example_func(rank, world_size) if __name__ == "__main__": # this script is launched via torchrun which automatically manages ProcessGroup rank = int(os.environ["RANK"]) world_size = int(os.environ["WORLD_SIZE"]) assert world_size == 4 # our example uses 4 worker ranks # parse the arguments parser = argparse.ArgumentParser( description="torchrec sharding examples", formatter_class=argparse.RawTextHelpFormatter, ) example_prompt = ( "choose one sharding example from below:\n" "\t1. row-wise-even;\n" "\t2. row-wise-uneven\n" "\t3. table-wise\n" "e.g. you want to try the row-wise even sharding example, please input 'row-wise-even'\n" ) parser.add_argument("-e", "--example", help=example_prompt, required=True) args = parser.parse_args() run_example(rank, world_size, args.example)
LocalShardsWrapper
python
sympy__sympy
sympy/integrals/manualintegrate.py
{ "start": 7930, "end": 8081 }
class ____(HyperbolicRule): """integrate(sinh(x), x) -> cosh(x)""" def eval(self) -> Expr: return cosh(self.variable) @dataclass
SinhRule
python
astropy__astropy
astropy/coordinates/tests/test_masked.py
{ "start": 12338, "end": 13202 }
class ____(TestFrame): """Tests that mask is calculated properly for SkyCoord. Note that this does all the tests from TestFrame, as well as a few specific to SkyCoord, i.e., that use attributes the frame does not have. """ @classmethod def setup_class(cls): super().setup_class() cls.p = np.linspace(900, 1000, cls.msph.size) << u.hPa cls.mask_p = np.array([True, False, False, False, False, False]) cls.mp = Masked(cls.p, cls.mask_p) # Ensure we have an attribute not associated with the frame. cls.fk5 = SkyCoord(cls.msph, frame="fk5", pressure=cls.mp) def test_non_frame_attribute(self): assert_array_equal(self.fk5.get_mask("pressure"), self.mask_p) assert_array_equal( self.fk5.get_mask("pressure", "data"), self.mask | self.mask_p )
TestSkyCoord
python
kamyu104__LeetCode-Solutions
Python/subrectangle-queries.py
{ "start": 109, "end": 1044 }
class ____(object): def __init__(self, rectangle): """ :type rectangle: List[List[int]] """ self.__rectangle = rectangle self.__updates = [] def updateSubrectangle(self, row1, col1, row2, col2, newValue): """ :type row1: int :type col1: int :type row2: int :type col2: int :type newValue: int :rtype: None """ self.__updates.append((row1, col1, row2, col2, newValue)) def getValue(self, row, col): """ :type row: int :type col: int :rtype: int """ for (row1, col1, row2, col2, newValue) in reversed(self.__updates): if row1 <= row <= row2 and col1 <= col <= col2: return newValue return self.__rectangle[row][col] # Time: ctor: O(1) # update: O(m * n) # get: O(1) # Space: O(1)
SubrectangleQueries
python
pytorch__pytorch
torch/_ops.py
{ "start": 31576, "end": 42312 }
class ____(OperatorBase, Generic[_P, _T]): def __init__( self, overloadpacket: "OpOverloadPacket", op: Callable[_P, _T], op_dk: Callable[Concatenate[DispatchKey, _P], _T], schema: torch._C.FunctionSchema, tags: list[Any], ) -> None: super().__init__() self._op = op self._op_dk = op_dk self._schema = schema self._overloadpacket = overloadpacket self._tags = tags self._overloadname = ( "default" if schema.overload_name == "" else schema.overload_name ) if tags: self._nondeterministic_seeded = torch.Tag.nondeterministic_seeded in tags self._name = self._schema.name if schema.overload_name: self._name += "." + schema.overload_name self.__name__ = f"{self._schema.name.split('::')[1]}.{self._overloadname}" self.__module__ = overloadpacket.__module__ op.__module__ = overloadpacket.__module__ self.__qualname__ = self._name self.__annotations__ = {} # If the OpOverload was constructed from a Library.def in Python. self._defined_in_python = self.__qualname__ in torch.library._defs # Logic replicated from aten/src/ATen/native/MathBitsFallback.h is_write = None for a in self._schema.arguments: # pyrefly: ignore # bad-assignment if a.alias_info is None: continue if is_write is None: is_write = a.alias_info.is_write else: # We will conservatively call mixed mutable/non-mutable # aliased inputs as NOT a view is_write = a.alias_info.is_write or is_write self.is_view = is_write is not None and not is_write @cached_property def _namespace(self) -> str: return self._schema.name.split("::", maxsplit=1)[0] @cached_property def _opname(self) -> str: return self._schema.name.split("::", maxsplit=1)[1] @cached_property def _handle(self) -> torch._C._DispatchOperatorHandle: return torch._C._dispatch_find_schema_or_throw( self._schema.name, self._schema.overload_name ) # it's a no-op since OpOverload object is immutable and must be unique for a given op overload. def __deepcopy__(self, memo=None): return self def __repr__(self): return f"<OpOverload(op='{self._namespace}.{self._opname}', overload='{self._overloadname}')>" # Use positional-only argument to avoid naming collision with aten ops arguments # that are named "self". This way, all the aten ops can be called by kwargs. def __call__(self, /, *args: _P.args, **kwargs: _P.kwargs) -> _T: return self._op(*args, **kwargs) # Use positional-only argument to avoid naming collision with aten ops arguments # that are named "self". This way, all the aten ops can be called by kwargs. def redispatch( self, /, keyset: torch._C.DispatchKeySet, *args: _P.args, **kwargs: _P.kwargs ) -> _T: return self._handle.redispatch_boxed(keyset, *args, **kwargs) # type: ignore[return-value] def __hash__(self): return hash(self._op) # `my_namespace.my_op_name.overload_name` def __str__(self): return "{}.{}.{}".format(*self._schema.name.split("::"), self._overloadname) def has_kernel_for_dispatch_key(self, k: DispatchKey) -> bool: return super().has_kernel_for_dispatch_key( k ) or torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), k) def has_kernel_for_any_dispatch_key(self, ks: torch._C.DispatchKeySet) -> bool: return torch._C._dispatch_has_kernel_for_any_dispatch_key( self.name(), ks ) or super().has_kernel_for_any_dispatch_key(ks) @property def namespace(self) -> str: return self._namespace def _can_decompose(self) -> bool: dk = DispatchKey.CompositeImplicitAutograd return dk in self.py_kernels or torch._C._dispatch_has_kernel_for_dispatch_key( self.name(), dk ) def decompose(self, *args: _P.args, **kwargs: _P.kwargs) -> _T: dk = DispatchKey.CompositeImplicitAutograd if dk in self.py_kernels: # NB: This branch is not too necessary anymore, because we can # apply Python CompositeImplicitAutograd *before* tracing # using Python dispatcher (also taking advantage of the autograd # formula). But it's included for completeness return self.py_kernels[dk](*args, **kwargs) elif torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), dk): return self._op_dk(dk, *args, **kwargs) else: return NotImplemented # pyrefly: ignore [bad-return] # Remove a dispatch key from the dispatch cache. This will force it to get # recomputed the next time. Does nothing # WARNING: if you register a dispatch key to py_kernels of an OpOverload, # calling _del_dispatch on that key is NOT sufficient to apply your change, # because a single registration may affect MULTIPLE dispatch keys (e.g., # registering Autograd affects AutogradCPU). del_dispatch is to be used # only if you are specifically modifying how get_dispatch handles a # particular input 'key'. def _uncache_dispatch(self, key: DispatchKey) -> None: self._dispatch_cache.pop(key, None) # This implements the pre-computation logic for the Python dispatcher. def _get_dispatch(self, key: DispatchKey) -> Union[DispatchKey, Callable[_P, _T]]: # This is only called upon a cache miss assert key not in self._dispatch_cache, f"{self} {key}" if key == DispatchKey.Python: if not isinstance(self, TorchBindOpOverload) and not self.python_key_table: self._dispatch_cache[key] = key add_cached_op(self) return key def handler(*args: _P.args, **kwargs: _P.kwargs) -> _T: from torch.utils._python_dispatch import _get_current_dispatch_mode # TODO: We also need to handle tensor subclasses here # TODO(voz): We should walk all the nodes here / turn it into a list, topmode is ok for now. curr_mode = type(_get_current_dispatch_mode()) assert curr_mode is not None, ( "Illegal invocation of dispatch on DispatchKey.Python without a mode." ) if curr_mode not in self.python_key_table: if isinstance(self, TorchBindOpOverload): with ( torch.utils._python_dispatch._pop_mode_temporarily() as mode ): return torch._library.utils.handle_dispatch_mode( mode, self, *args, **kwargs ) else: return self._op_dk(key, *args, **kwargs) with torch.utils._python_dispatch._pop_mode_temporarily() as mode: return self.python_key_table[curr_mode](mode, *args, **kwargs) # type: ignore[index] self._dispatch_cache[key] = handler add_cached_op(self) return handler functionality_key = torch._C._to_functionality_key(key) # type: ignore[attr-defined] if functionality_key == DispatchKey.PreDispatch: curr_stack_len = _len_torch_dispatch_stack_pre_dispatch() # The check for Python in the exclude set is so we properly respect `with no_dispatch()` # calls inside of a mode. if ( curr_stack_len > 0 and not torch._C._dispatch_tls_is_dispatch_key_excluded( DispatchKey.Python ) ): def handler(*args: _P.args, **kwargs: _P.kwargs) -> _T: @contextlib.contextmanager def _temporarily_pop_modes_from_pre_dispatch(): top_mode = _pop_mode_from_pre_dispatch() try: yield top_mode finally: _set_mode_pre_dispatch(top_mode) with _temporarily_pop_modes_from_pre_dispatch() as curr_mode: return torch._library.utils.handle_dispatch_mode( curr_mode, self, *args, **kwargs ) # Note [Not Caching Per-Dispatch-Key Mode Handlers] # Note that we're not caching this handler. There isn't really a point, since the slow bit # is the handler itself (in python). # Also, not caching means that we don't have to reset the cache when any existing # modes go out of scope (which in of itself takes time to loop through all operators). return handler final_key = resolve_key(self, key) # See Note [Not Caching Per-Dispatch-Key Mode Handlers] cache_result = key != DispatchKey.PreDispatch # TODO: We could potentially have lots of debugging wrappers against # dispatch keys; design some general registration mechanism instead of # having if statement for each of them if key == DispatchKey.Functionalize: import torch._dispatch.python as pydispatch if pydispatch.CROSSREF_FUNCTIONALIZE: handler = pydispatch.make_crossref_functionalize(self, final_key) # type: ignore[assignment] if cache_result: self._dispatch_cache[key] = handler add_cached_op(self) return handler r = self.py_kernels.get(final_key, final_key) if cache_result: self._dispatch_cache[key] = r # pyrefly: ignore [unsupported-operation] add_cached_op(self) return r # pyrefly: ignore [bad-return] def name(self): return self._name @property def overloadpacket(self): return self._overloadpacket @property def op(self): return self._op @property def tags(self): return self._tags # TODO: add more methods to expose information about input and output arguments # TorchBindOpOverload are those custom ops which have at least one overload's # schema consists of torch.ScriptObject (i.e. custom class) input. # TorchBindOpOverload will skip C++ dispatcher and purely dispatched in python # when its inputs contain FakeScriptObject in a similar way as higher order ops.
OpOverload
python
optuna__optuna
optuna/_gp/gp.py
{ "start": 2275, "end": 3545 }
class ____(torch.autograd.Function): @staticmethod def forward(ctx: Any, squared_distance: torch.Tensor) -> torch.Tensor: """ This method calculates `exp(-sqrt5d) * (1/3 * sqrt5d ** 2 + sqrt5d + 1)` where `sqrt5d = sqrt(5 * squared_distance)`. Please note that automatic differentiation by PyTorch does not work well at `squared_distance = 0` due to zero division, so we manually save the derivative, i.e., `-5/6 * (1 + sqrt5d) * exp(-sqrt5d)`, for the exact derivative calculation. Notice that the derivative of this function is taken w.r.t. d**2, but not w.r.t. d. """ sqrt5d = torch.sqrt(5 * squared_distance) exp_part = torch.exp(-sqrt5d) val = exp_part * ((5 / 3) * squared_distance + sqrt5d + 1) deriv = (-5 / 6) * (sqrt5d + 1) * exp_part ctx.save_for_backward(deriv) return val @staticmethod def backward(ctx: Any, grad: torch.Tensor) -> torch.Tensor: """ Let x be squared_distance, f(x) be forward(ctx, x), and g(f) be a provided function, then deriv := df/dx, grad := dg/df, and deriv * grad = df/dx * dg/df = dg/dx. """ (deriv,) = ctx.saved_tensors return deriv * grad
Matern52Kernel
python
numpy__numpy
numpy/_core/tests/test_records.py
{ "start": 13344, "end": 13858 }
class ____: # Test that pathlib.Path can be used def test_tofile_fromfile(self): with temppath(suffix='.bin') as path: path = Path(path) np.random.seed(123) a = np.random.rand(10).astype('f8,i4,S5') a[5] = (0.5, 10, 'abcde') with path.open("wb") as fd: a.tofile(fd) x = np._core.records.fromfile( path, formats='f8,i4,S5', shape=10 ) assert_array_equal(x, a)
TestPathUsage
python
tensorflow__tensorflow
tensorflow/python/ops/ragged/row_partition.py
{ "start": 2597, "end": 49804 }
class ____(composite_tensor.CompositeTensor): """Partitioning of a sequence of values into contiguous subsequences ("rows"). A `RowPartition` describes how a sequence with `nvals` items should be divided into `nrows` contiguous subsequences ("rows"). For example, a `RowPartition` could be used to partition the vector `[1, 2, 3, 4, 5]` into subsequences `[[1, 2], [3], [], [4, 5]]`. Note that `RowPartition` stores information about how values are partitioned, but does not include the partitioned values themselves. `tf.RaggedTensor` is used to pair a `values` tensor with one or more `RowPartition`s, providing a complete encoding for a ragged tensor (i.e. a tensor with variable-length dimensions). `RowPartition`s may be defined using several different schemes: * `row_lengths`: an integer vector with shape `[nrows]`, which specifies the length of each row. * `row_splits`: an integer vector with shape `[nrows+1]`, specifying the "split points" between each row. * `row_starts`: an integer vector with shape `[nrows]`, which specifies the start offset for each row. Equivalent to `row_splits[:-1]`. * `row_limits`: an integer vector with shape `[nrows]`, which specifies the stop offset for each row. Equivalent to `row_splits[1:]`. * `value_rowids` is an integer vector with shape `[nvals]`, corresponding one-to-one with sequence values, which specifies the row that each value belongs to. If the partition has empty trailing rows, then `nrows` must also be specified. * `uniform_row_length` is an integer scalar, specifying the length of every row. This scheme may only be used if all rows have the same length. For example, the following `RowPartition`s all represent the partitioning of 8 values into 5 sublists as follows: `[[*, *, *, *], [], [*, *, *], [*], []]`. >>> p1 = RowPartition.from_row_lengths([4, 0, 3, 1, 0]) >>> p2 = RowPartition.from_row_splits([0, 4, 4, 7, 8, 8]) >>> p3 = RowPartition.from_row_starts([0, 4, 4, 7, 8], nvals=8) >>> p4 = RowPartition.from_row_limits([4, 4, 7, 8, 8]) >>> p5 = RowPartition.from_value_rowids([0, 0, 0, 0, 2, 2, 2, 3], nrows=5) For more information about each scheme, see the documentation for the its factory method. For additional examples, see the documentation on `tf.RaggedTensor`. ### Precomputed Encodings `RowPartition` always stores at least one encoding of the partitioning, but it can be configured to cache additional encodings as well. This can avoid unnecessary recomputation in eager mode. (In graph mode, optimizations such as common subexpression elimination will typically prevent these unnecessary recomputations.) To check which encodings are precomputed, use `RowPartition.has_precomputed_<encoding>`. To cache an additional encoding, use `RowPartition.with_precomputed_<encoding>`. """ # ============================================================================= # Constructor (private) # ============================================================================= def __init__(self, row_splits, row_lengths=None, value_rowids=None, nrows=None, uniform_row_length=None, nvals=None, internal=False): """Creates a `RowPartition` from the specified encoding tensor(s). This constructor is private -- please use one of the following ops to build `RowPartition`s: * `RowPartition.from_row_lengths` * `RowPartition.from_value_rowids` * `RowPartition.from_row_splits` * `RowPartition.from_row_starts` * `RowPartition.from_row_limits` * `RowPartition.from_uniform_row_length` If row_splits is has a constant value, then all other arguments should have a constant value. Args: row_splits: A 1-D integer tensor with shape `[nrows+1]`. row_lengths: A 1-D integer tensor with shape `[nrows]` value_rowids: A 1-D integer tensor with shape `[nvals]`. nrows: A 1-D integer scalar tensor. uniform_row_length: A scalar tensor. nvals: A scalar tensor. internal: Private key value, required to ensure that this private constructor is *only* called from the factory methods. Raises: TypeError: If a row partitioning tensor has an inappropriate dtype. TypeError: If exactly one row partitioning argument was not specified. ValueError: If a row partitioning tensor has an inappropriate shape. ValueError: If multiple partitioning arguments are specified. ValueError: If nrows is specified but value_rowids is not None. """ if internal is not _row_partition_factory_key: raise ValueError("RowPartition constructor is private; please use one " "of the factory methods instead (e.g., " "RowPartition.from_row_lengths())") # Validate the arguments. if not isinstance(row_splits, tensor_lib.Tensor): raise TypeError("Row-partitioning argument must be a Tensor, got %r" % row_splits) if row_splits.dtype not in (dtypes.int32, dtypes.int64): raise ValueError("Row-partitioning argument must be int32 or int64") # Validate shapes & dtypes. row_splits.shape.assert_has_rank(1) row_splits.set_shape([None]) self._row_splits = row_splits # Store any cached tensors. These are used to avoid unnecessary # round-trip conversions when a RowPartition is constructed from # lengths or rowids, and we later want those lengths/rowids back. for tensor in [row_lengths, value_rowids, nrows, uniform_row_length, nvals]: if tensor is not None: if not isinstance(tensor, tensor_lib.Tensor): raise TypeError("Cached value must be a Tensor or None.") elif tensor.dtype != row_splits.dtype: raise ValueError(f"Inconsistent dtype for encoding tensors: " f"{tensor} vs {row_splits}") self._row_lengths = row_lengths self._value_rowids = value_rowids self._nrows = nrows self._uniform_row_length = uniform_row_length self._nvals = nvals # ============================================================================= # Factory Methods # ============================================================================= @classmethod def from_value_rowids(cls, value_rowids, nrows=None, validate=True, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by `value_rowids`. This `RowPartition` divides a sequence `values` into rows by specifying which row each value should be added to: ```python partitioned_rows = [[] for _ in nrows] for (value, rowid) in zip(values, value_rowids): partitioned_rows[rowid].append(value) ``` Args: value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds one-to-one with `values`, and specifies each value's row index. Must be nonnegative, and must be sorted in ascending order. nrows: An integer scalar specifying the number of rows. This should be specified if the `RowPartition` may containing empty training rows. Must be greater than `value_rowids[-1]` (or greater than or equal to zero if `value_rowids` is empty). Defaults to `value_rowids[-1] + 1` (or zero if `value_rowids` is empty). validate: If true, then use assertions to check that the arguments form a valid `RowPartition`. dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of `value_rowids`, dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. Returns: A `RowPartition`. Raises: ValueError: If `nrows` is incompatible with `value_rowids`. #### Example: >>> print(RowPartition.from_value_rowids( ... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], ... nrows=4)) tf.RowPartition(row_splits=[0 4 4 7 8]) """ # Local import bincount_ops to avoid import-cycle since bincount_ops # imports ragged_tensor. from tensorflow.python.ops import bincount_ops # pylint: disable=g-import-not-at-top if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(None, "RowPartitionFromValueRowIds", [value_rowids, nrows]): value_rowids = cls._convert_row_partition( value_rowids, "value_rowids", dtype_hint=dtype_hint, dtype=dtype) if nrows is None: const_rowids = tensor_util.constant_value(value_rowids) if const_rowids is None: nrows = array_ops.concat([value_rowids[-1:], [-1]], axis=0)[0] + 1 const_nrows = None else: const_nrows = const_rowids[-1] + 1 if const_rowids.size > 0 else 0 nrows = ops.convert_to_tensor( const_nrows, value_rowids.dtype, name="nrows") else: nrows = ops.convert_to_tensor(nrows, value_rowids.dtype, "nrows") const_nrows = tensor_util.constant_value(nrows) if const_nrows is not None: if const_nrows < 0: raise ValueError("Expected nrows >= 0; got %d" % const_nrows) const_rowids = tensor_util.constant_value(value_rowids) if const_rowids is not None and const_rowids.size > 0: if not const_nrows >= const_rowids[-1] + 1: raise ValueError( "Expected nrows >= value_rowids[-1] + 1; got nrows=%d, " "value_rowids[-1]=%d" % (const_nrows, const_rowids[-1])) value_rowids.shape.assert_has_rank(1) nrows.shape.assert_has_rank(0) if validate: msg = ("Arguments to from_value_rowids do not form a valid " "RowPartition") checks = [ check_ops.assert_rank(value_rowids, 1, message=msg), check_ops.assert_rank(nrows, 0, message=msg), check_ops.assert_non_negative(value_rowids[:1], message=msg), _assert_monotonic_increasing(value_rowids, message=msg), check_ops.assert_less(value_rowids[-1:], nrows, message=msg), ] value_rowids = control_flow_ops.with_dependencies(checks, value_rowids) # Convert value_rowids & nrows to row_splits. # Note: we don't use segment_ids_to_row_splits() here because we want # to save the intermediate value `row_lengths`, so we can cache it. # TODO(b/116708836) Upgrade bincount to accept int64 so we can skip the # cast. value_rowids_int32 = math_ops.cast(value_rowids, dtypes.int32) nrows_int32 = math_ops.cast(nrows, dtypes.int32) row_lengths = bincount_ops.bincount( value_rowids_int32, minlength=nrows_int32, maxlength=nrows_int32, dtype=value_rowids.dtype) row_splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0) if const_nrows is not None: row_lengths.set_shape([const_nrows]) row_splits.set_shape([const_nrows + 1]) return cls( row_splits=row_splits, row_lengths=row_lengths, value_rowids=value_rowids, nrows=nrows, internal=_row_partition_factory_key) @classmethod def from_row_splits(cls, row_splits, validate=True, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by `row_splits`. This `RowPartition` divides a sequence `values` into rows by indicating where each row begins and ends: ```python partitioned_rows = [] for i in range(len(row_splits) - 1): row_start = row_splits[i] row_end = row_splits[i + 1] partitioned_rows.append(values[row_start:row_end]) ``` Args: row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be empty, and must be sorted in ascending order. `row_splits[0]` must be zero. validate: If true, then use assertions to check that the arguments form a valid `RowPartition`. dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of `row_splits`, dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. Returns: A `RowPartition`. Raises: ValueError: If `row_splits` is an empty list. """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") if isinstance(row_splits, (list, tuple)) and not row_splits: raise ValueError("row_splits tensor may not be empty.") if isinstance(row_splits, tensor_lib.TensorSpec): return cls(row_splits=row_splits, internal=_row_partition_factory_key) with ops.name_scope(None, "RowPartitionFromRowSplits", [row_splits]): row_splits = cls._convert_row_partition( row_splits, "row_splits", dtype_hint=dtype_hint, dtype=dtype) row_splits.shape.assert_has_rank(1) if validate: msg = "Arguments to from_row_splits do not form a valid RaggedTensor:" checks = [ check_ops.assert_rank(row_splits, 1, message=(msg + "rank")), _assert_zero(row_splits[0], message=(msg + "zero")), _assert_monotonic_increasing( row_splits, message=(msg + "monotonic")), ] row_splits = control_flow_ops.with_dependencies(checks, row_splits) return cls(row_splits=row_splits, internal=_row_partition_factory_key) @classmethod def from_row_lengths(cls, row_lengths, validate=True, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by `row_lengths`. This `RowPartition` divides a sequence `values` into rows by indicating the length of each row: ```python partitioned_rows = [[values.pop(0) for _ in range(length)] for length in row_lengths] ``` Args: row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be nonnegative. validate: If true, then use assertions to check that the arguments form a valid `RowPartition`. dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of `row_lengths`, dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. Returns: A `RowPartition`. """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(None, "RowPartitionFromRowLengths", [row_lengths]): row_lengths = cls._convert_row_partition( row_lengths, "row_lengths", dtype_hint=dtype_hint, dtype=dtype) row_lengths.shape.assert_has_rank(1) if validate: msg = "Arguments to from_row_lengths do not form a valid RowPartition" checks = [ check_ops.assert_rank(row_lengths, 1, message=msg), check_ops.assert_non_negative(row_lengths, message=msg), ] row_lengths = control_flow_ops.with_dependencies(checks, row_lengths) row_limits = math_ops.cumsum(row_lengths) row_splits = array_ops.concat([[0], row_limits], axis=0) return cls( row_splits=row_splits, row_lengths=row_lengths, internal=_row_partition_factory_key) @classmethod def from_row_starts(cls, row_starts, nvals, validate=True, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by `row_starts`. Equivalent to: `from_row_splits(concat([row_starts, nvals], axis=0))`. Args: row_starts: A 1-D integer tensor with shape `[nrows]`. Must be nonnegative and sorted in ascending order. If `nrows>0`, then `row_starts[0]` must be zero. nvals: A scalar tensor indicating the number of values. validate: If true, then use assertions to check that the arguments form a valid `RowPartition`. dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of `row_starts`, dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. Returns: A `RowPartition`. """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(None, "RowPartitionFromRowStarts", [row_starts]): row_starts = cls._convert_row_partition( row_starts, "row_starts", dtype_hint=dtype_hint, dtype=dtype) row_starts.shape.assert_has_rank(1) # TODO(martinz): nvals and row_starts could be inconsistent at call time, # even though they eventually end up the same type. nvals = math_ops.cast(nvals, row_starts.dtype) if validate: msg = "Arguments to from_row_starts do not form a valid RaggedTensor" checks = [ check_ops.assert_rank(row_starts, 1, message=msg), _assert_zero(row_starts[:1], message=msg), _assert_monotonic_increasing(row_starts, message=msg), check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg), ] row_starts = control_flow_ops.with_dependencies(checks, row_starts) row_splits = array_ops.concat([row_starts, [nvals]], axis=0) return cls(row_splits=row_splits, nvals=nvals, internal=_row_partition_factory_key) @classmethod def from_row_limits(cls, row_limits, validate=True, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by `row_limits`. Equivalent to: `from_row_splits(values, concat([0, row_limits], axis=0))`. Args: row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in ascending order. validate: If true, then use assertions to check that the arguments form a valid `RowPartition`. dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of `row_limits`, dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. Returns: A `RowPartition`. """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(None, "RowPartitionFromRowLimits", [row_limits]): row_limits = cls._convert_row_partition( row_limits, "row_limits", dtype_hint=dtype_hint, dtype=dtype) row_limits.shape.assert_has_rank(1) if validate: msg = "Arguments to from_row_limits do not form a valid RaggedTensor" checks = [ check_ops.assert_rank(row_limits, 1, message=msg), check_ops.assert_non_negative(row_limits[:1], message=msg), _assert_monotonic_increasing(row_limits, message=msg), ] row_limits = control_flow_ops.with_dependencies(checks, row_limits) zero = array_ops.zeros([1], row_limits.dtype) row_splits = array_ops.concat([zero, row_limits], axis=0) return cls(row_splits=row_splits, internal=_row_partition_factory_key) @classmethod def from_uniform_row_length(cls, uniform_row_length, nvals=None, nrows=None, validate=True, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by `uniform_row_length`. This `RowPartition` divides a sequence `values` into rows that all have the same length: ```python partitioned_rows = [[values.pop(0) for _ in range(uniform_row_length)] for _ in range(nrows)] ``` Note that either or both of nvals and nrows must be specified. Args: uniform_row_length: A scalar integer tensor. Must be nonnegative. The size of the outer axis of `values` must be evenly divisible by `uniform_row_length`. nvals: a non-negative scalar integer tensor for the number of values. Must be specified if nrows is not specified. If not specified, defaults to uniform_row_length*nrows nrows: The number of rows in the constructed RowPartition. If not specified, then it defaults to `nvals/uniform_row_length` (or `0` if `uniform_row_length==0`). `nrows` only needs to be specified if `uniform_row_length` might be zero. `uniform_row_length*nrows` must be `nvals`. validate: If true, then use assertions to check that the arguments form a valid `RowPartition`. dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of `uniform_row_length`, dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. Returns: A `RowPartition`. """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") if nrows is None and nvals is None: raise ValueError("Either (or both) of nvals and nrows must be specified") with ops.name_scope(None, "RowPartitionFromUniformRowLength", [uniform_row_length, nrows]): [uniform_row_length, nvals, nrows ] = _convert_all_to_tensors([(uniform_row_length, "uniform_row_length"), (nvals, "nvals"), (nrows, "nrows")], dtype=dtype, dtype_hint=dtype_hint) uniform_row_length.shape.assert_has_rank(0) # Find nrows. const_row_length = tensor_util.constant_value(uniform_row_length) if nrows is None: if const_row_length is None: # Avoid division by zero if uniform_row_length==0 (and nvals==0). rowlen_or_1 = math_ops.maximum( uniform_row_length, constant_op.constant(1, uniform_row_length.dtype)) nrows = nvals // rowlen_or_1 elif const_row_length == 0: nrows = constant_op.constant(0, dtype=uniform_row_length.dtype) else: nrows = nvals // const_row_length const_nrows = None if nrows is None else tensor_util.constant_value(nrows) const_nvals = None if nvals is None else tensor_util.constant_value(nvals) const_uniform_row_length = tensor_util.constant_value(uniform_row_length) checks = [] if const_nvals is None and const_nrows is not None and const_uniform_row_length is not None: const_nvals = const_nrows * const_uniform_row_length if nvals is not None and validate: checks.append(check_ops.assert_equal(nvals, const_nvals)) nvals = constant_op.constant(const_nvals, uniform_row_length.dtype) if nvals is None: nvals = nrows * uniform_row_length # Find row_splits. if const_nrows is not None and const_row_length is not None: row_splits = [v * const_row_length for v in range(const_nrows + 1)] row_splits = constant_op.constant(row_splits, uniform_row_length.dtype) else: row_splits = math_ops.range( nrows + 1, dtype=uniform_row_length.dtype) * uniform_row_length if validate: if (const_nrows is None or const_row_length is None or const_nvals is None): checks.append( check_ops.assert_equal( nrows * uniform_row_length, nvals, ("uniform_row_length", uniform_row_length, "times nrows", nrows, "must equal nvals", nvals))) else: if const_nrows * const_row_length != const_nvals: raise ValueError( "uniform_row_length=%d times nrows=%d must equal nvals=%d" % (const_row_length, const_nrows, const_nvals)) if uniform_row_length.shape.rank is None: checks.append( check_ops.assert_rank( uniform_row_length, 0, message="uniform_row_length must be a scalar.")) const_row_length = tensor_util.constant_value(uniform_row_length) if const_row_length is None: checks.append( check_ops.assert_greater_equal( uniform_row_length, constant_op.constant(0, uniform_row_length.dtype), message="uniform_row_length must be >= 0.")) else: if const_row_length < 0: raise ValueError("uniform_row_length must be >= 0.") row_splits = control_flow_ops.with_dependencies(checks, row_splits) return cls( row_splits=row_splits, uniform_row_length=uniform_row_length, nrows=nrows, nvals=nvals, internal=_row_partition_factory_key) @classmethod def _convert_row_partition(cls, partition, name, dtype=None, dtype_hint=None): """Converts `partition` to Tensors. Args: partition: A row-partitioning tensor for the `RowPartition` being constructed. I.e., one of: row_splits, row_lengths, row_starts, row_limits, value_rowids, uniform_row_length. name: The name of the row-partitioning tensor. dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of `uniform_row_length`, dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. Returns: A tensor equivalent to partition. Raises: ValueError: if dtype is not int32 or int64. """ if dtype_hint is None: dtype_hint = dtypes.int64 if (isinstance(partition, np.ndarray) and partition.dtype == np.int32 and dtype is None): partition = ops.convert_to_tensor(partition, name=name) else: partition = tensor_conversion.convert_to_tensor_v2( partition, dtype_hint=dtype_hint, dtype=dtype, name=name ) if partition.dtype not in (dtypes.int32, dtypes.int64): raise ValueError("%s must have dtype int32 or int64" % name) return partition def _with_dependencies(self, dependencies): """Returns a new RowPartition equal to self with control dependencies. Specifically, self._row_splits is gated by the given control dependencies. Used to add sanity checks to the constructors. Args: dependencies: a list of tensors to use as dependencies. Returns: A new RowPartition object. """ new_row_splits = control_flow_ops.with_dependencies(dependencies, self._row_splits) return RowPartition( row_splits=new_row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self._nrows, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key) # ============================================================================= # Accessors # ============================================================================= @property def dtype(self): """The `DType` used to encode the row partition (either int32 or int64).""" return self._row_splits.dtype def row_splits(self): """Returns the row-split indices for this row partition. `row_splits` specifies where the values for each row begin and end. In particular, the values for row `i` are stored in the slice `values[row_splits[i]:row_splits[i+1]]`. Returns: A 1-D integer `Tensor` with shape `[self.nrows+1]`. The returned tensor is non-empty, and is sorted in ascending order. `self.row_splits()[0] == 0`. `self.row_splits()[-1] == self.nvals()`. """ return self._row_splits def value_rowids(self): """Returns the row indices for this row partition. `value_rowids` specifies the row index for each value. In particular, `value_rowids[i]` is the row index for `values[i]`. Returns: A 1-D integer `Tensor` with shape `[self.nvals()]`. The returned tensor is nonnegative, and is sorted in ascending order. """ if self._value_rowids is not None: return self._value_rowids return segment_id_ops.row_splits_to_segment_ids(self._row_splits) def nvals(self): """Returns the number of values partitioned by this `RowPartition`. If the sequence partitioned by this `RowPartition` is a tensor, then `nvals` is the size of that tensor's outermost dimension -- i.e., `nvals == values.shape[0]`. Returns: scalar integer Tensor """ # TODO(martinz): Uncomment these lines. # if self._nvals is not None: # return self._nvals return self._row_splits[-1] def nrows(self): """Returns the number of rows created by this `RowPartition`. Returns: scalar integer Tensor """ if self._nrows is not None: return self._nrows nsplits = tensor_shape.dimension_at_index(self._row_splits.shape, 0) if nsplits.value is None: return array_ops.shape(self._row_splits, out_type=self.dtype)[0] - 1 else: return constant_op.constant(nsplits.value - 1, dtype=self.dtype) def uniform_row_length(self): """Returns the length of each row in this partition, if rows are uniform. If all rows in this `RowPartition` have the same length, then this returns that length as a scalar integer `Tensor`. Otherwise, it returns `None`. Returns: scalar Tensor with `type=self.dtype`, or `None`. """ return self._uniform_row_length def row_starts(self): """Returns the start indices for rows in this row partition. These indices specify where the values for each row begin. `partition.row_starts()` is equal to `partition.row_splits()[:-1]`. Returns: A 1-D integer Tensor with shape `[self.nrows()]`. The returned tensor is nonnegative, and is sorted in ascending order. `self.row_starts()[0] == 0`. `self.row_starts()[-1] <= self.nvals()`. """ return self._row_splits[:-1] def row_limits(self): """Returns the limit indices for rows in this row partition. These indices specify where the values for each row end. `partition.row_limits()` is equal to `partition.row_splits()[:-1]`. Returns: A 1-D integer Tensor with shape `[self.nrows]`. The returned tensor is nonnegative, and is sorted in ascending order. `self.row_limits()[-1] == self.nvals()`. """ return self._row_splits[1:] def row_lengths(self): """Returns the lengths of rows in this `RowPartition`. Returns: A 1-D integer Tensor with shape `[self.nrows]`. The returned tensor is nonnegative. `tf.reduce_sum(self.row_lengths) == self.nvals()`. """ if self._row_lengths is not None: return self._row_lengths splits = self._row_splits return splits[1:] - splits[:-1] @property def static_nrows(self): """The number of rows in this partition, if statically known. ```python self.row_lengths().shape == [self.static_nrows] self.row_starts().shape == [self.static_nrows] self.row_limits().shape == [self.static_nrows] self.row_splits().shape == [self.static_nrows + 1] ``` Returns: The number of rows in this partition as an `int` (if statically known); or `None` (otherwise). """ if self._row_splits is not None: nrows_plus_one = tensor_shape.dimension_value(self._row_splits.shape[0]) if nrows_plus_one is not None: return nrows_plus_one - 1 if self._row_lengths is not None: nrows = tensor_shape.dimension_value(self._row_lengths.shape[0]) if nrows is not None: return nrows if self._nrows is not None: return tensor_util.constant_value(self._nrows) return None @property def static_nvals(self): """The number of values in this partition, if statically known. ```python self.value_rowids().shape == [self.static_vals] ``` Returns: The number of values in this partition as an `int` (if statically known); or `None` (otherwise). """ if self._nvals is not None: nvals = tensor_util.constant_value(self._nvals) if nvals is not None: return nvals if self._value_rowids is not None: nvals = tensor_shape.dimension_at_index(self._value_rowids.shape, 0) if nvals.value is not None: return nvals.value return None @property def static_uniform_row_length(self): """The number of values in each row of this partition, if statically known. Returns: The number of values in each row of this partition as an `int` (if statically known); or `None` (otherwise). """ if self._uniform_row_length is not None: return tensor_util.constant_value(self._uniform_row_length) return None def offsets_in_rows(self): """Return the offset of each value. RowPartition takes an array x and converts it into sublists. offsets[i] is the index of x[i] in its sublist. Given a shape, such as: [*,*,*],[*,*],[],[*,*] This returns: 0,1,2,0,1,0,1 Returns: an offset for every value. """ return gen_ragged_math_ops.ragged_range( starts=constant_op.constant(0, self.dtype), limits=self.row_lengths(), deltas=constant_op.constant(1, self.dtype)).rt_dense_values def is_uniform(self): """Returns true if the partition is known to be uniform statically. This is based upon the existence of self._uniform_row_length. For example: RowPartition.from_row_lengths([3,3,3]).is_uniform()==false RowPartition.from_uniform_row_length(5, nvals=20).is_uniform()==true RowPartition.from_row_lengths([2,0,2]).is_uniform()==false Returns: Whether a RowPartition is known to be uniform statically. """ return self._uniform_row_length is not None def _static_check(self): """Checks if the object is internally consistent. Raises: ValueError if inconsistent. """ my_dtype = self.dtype if self._uniform_row_length is not None: if self._uniform_row_length.dtype != my_dtype: raise ValueError("_uniform_row_length.dtype=" + str(self._uniform_row_length.dtype) + ", not " + str(my_dtype)) if self._row_lengths is not None and self._row_lengths.dtype != my_dtype: raise ValueError("_row_lengths.dtype=" + str(self._row_lengths.dtype) + ", not " + str(my_dtype)) if self._value_rowids is not None and self._value_rowids.dtype != my_dtype: raise ValueError("_value_rowids.dtype=" + str(self._value_rowids.dtype) + ", not " + str(my_dtype)) if self._nrows is not None and self._nrows.dtype != my_dtype: raise ValueError("_nrows.dtype=" + str(self._nrows.dtype) + ", not " + str(my_dtype)) # ============================================================================= # Transformation # ============================================================================= def with_dtype(self, dtype): """Returns a copy of this RowPartition with the given encoding dtype. Args: dtype: The dtype for encoding tensors, such as `row_splits` and `nrows`. One of `tf.int32` or `tf.int64`. Returns: A copy of this RowPartition, with the encoding tensors cast to the given type. """ dtype = dtypes.as_dtype(dtype) if dtype not in (dtypes.int32, dtypes.int64): raise ValueError("dtype must be int32 or int64") if self.dtype == dtype: return self return RowPartition( row_splits=_cast_if_not_none(self._row_splits, dtype), row_lengths=_cast_if_not_none(self._row_lengths, dtype), value_rowids=_cast_if_not_none(self._value_rowids, dtype), nrows=_cast_if_not_none(self._nrows, dtype), uniform_row_length=_cast_if_not_none(self._uniform_row_length, dtype), internal=_row_partition_factory_key) # ============================================================================= # String Encoding # ============================================================================= def __repr__(self): if self._uniform_row_length is not None: return (f"tf.RowPartition(nrows={self._nrows}, " f"uniform_row_length={self._uniform_row_length})") else: return f"tf.RowPartition(row_splits={self._row_splits})" # ============================================================================= # Precomputed Encodings # ============================================================================= def _has_precomputed_row_splits(self): """Returns true if `row_splits` has already been computed. If true, then `self.row_splits()` will return its value without calling any TensorFlow ops. """ return self._row_splits is not None def _has_precomputed_row_lengths(self): """Returns true if `row_lengths` has already been computed. If true, then `self.row_lengths()` will return its value without calling any TensorFlow ops. """ return self._row_lengths is not None def _has_precomputed_value_rowids(self): """Returns true if `value_rowids` has already been computed. If true, then `self.value_rowids()` will return its value without calling any TensorFlow ops. """ return self._value_rowids is not None def _has_precomputed_nrows(self): """Returns true if `nrows` has already been computed. If true, then `self.nrows()` will return its value without calling any TensorFlow ops. """ return self._nrows is not None def _has_precomputed_nvals(self): """Returns true if `nvals` has already been computed. If true, then `self.nvals()` will return its value without calling any TensorFlow ops. """ return self._nvals is not None def _with_precomputed_row_splits(self): """Returns a copy of `self` with `row_splits` precomputed.""" return RowPartition( row_splits=self.row_splits(), row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self._nrows, uniform_row_length=self._uniform_row_length, nvals=self._nvals, internal=_row_partition_factory_key) def _with_precomputed_row_lengths(self): """Returns a copy of `self` with `row_lengths` precomputed.""" return RowPartition( row_splits=self._row_splits, row_lengths=self.row_lengths(), value_rowids=self._value_rowids, nrows=self._nrows, nvals=self._nvals, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key) def _with_precomputed_value_rowids(self): """Returns a copy of `self` with `value_rowids` precomputed.""" return RowPartition( row_splits=self._row_splits, row_lengths=self._row_lengths, value_rowids=self.value_rowids(), nrows=self._nrows, nvals=self._nvals, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key) def _with_precomputed_nrows(self): """Returns a copy of `self` with `nrows` precomputed.""" return RowPartition( row_splits=self._row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self.nrows(), nvals=self._nvals, uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key) def _with_precomputed_nvals(self): """Returns a copy of `self` with `row_splits` precomputed.""" return RowPartition( row_splits=self.row_splits(), row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self._nrows, nvals=self.nvals(), uniform_row_length=self._uniform_row_length, internal=_row_partition_factory_key) def _merge_with_spec(self, b): """Merge with a TypeSpec to create a new RowPartition.""" a_spec = self._type_spec if not a_spec.is_compatible_with(b): # TODO(martinz): Should a dynamic check be used here? raise ValueError("RowPartition and RowPartitionSpec are not compatible") nrows = constant_op.constant( b.nrows, self.dtype) if b.nrows is not None else self._nrows nvals = constant_op.constant( b.nvals, self.dtype) if b.nvals is not None else self._nvals uniform_row_length = constant_op.constant( b.uniform_row_length, self.dtype ) if b.uniform_row_length is not None else self._uniform_row_length return RowPartition( row_splits=self._row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nvals=nvals, uniform_row_length=uniform_row_length, nrows=nrows, internal=_row_partition_factory_key) def _merge_precomputed_encodings(self, other, validate=True): """Returns a RowPartition that merges encodings from `self` and `other`. Requires that `self` and `other` describe the same partition. Args: other: A `RowPartition` that encodes the same partition as `self`. validate: If true, then add runtime checks to verify that `self` and `other` encode the same row partition. Returns: A `RowPartition`. """ # pylint: disable=protected-access if (self is other or # Fast path if row partitions are equal. (self._row_splits is other._row_splits and self._row_lengths is other._row_lengths and self._value_rowids is other._value_rowids and self._nrows is other._nrows and self._nvals is other._nvals and self._uniform_row_length is other._uniform_row_length)): return self # Merge the component tensors. We only need to validate one encoding. # We merge less-expensive encodings first (to avoid expensive validation). nrows, nrows_validated = _merge_tensors(self._nrows, other._nrows, "nrows", validate) nvals, _ = _merge_tensors(self._nvals, other._nvals, "nvals", validate) uniform_row_length, uniform_row_length_validated = _merge_tensors( self._uniform_row_length, other._uniform_row_length, "uniform_row_length", validate) if uniform_row_length_validated and nrows_validated: validate = False # Validation complete. row_splits, row_splits_validated = _merge_tensors(self._row_splits, other._row_splits, "row_splits", validate) if row_splits_validated: validate = False # Validation complete. row_lengths, row_lengths_validated = _merge_tensors(self._row_lengths, other._row_lengths, "row_lengths", validate) if row_lengths_validated: validate = False # Validation complete. value_rowids, value_rowids_validated = _merge_tensors( self._value_rowids, other._value_rowids, "value_rowids", validate) if value_rowids_validated and nrows_validated: validate = False # Validation complete. # TODO(edloper): If we make the row_splits encoding optional, then there # will be cases where we need to do validation at this point -- e.g. if # self has only row_splits and other has only value_rowids. But for # now, we are guaranteed to have done validation by this point. # Avoid creating new RowPartition objects if we don't need to. if (row_splits is self._row_splits and row_lengths is self._row_lengths and value_rowids is self._value_rowids and nrows is self._nrows and uniform_row_length is self._uniform_row_length): return self if (row_splits is other._row_splits and row_lengths is other._row_lengths and value_rowids is other._value_rowids and nrows is other._nrows and uniform_row_length is other._uniform_row_length): return other return RowPartition( row_splits=row_splits, row_lengths=row_lengths, value_rowids=value_rowids, nrows=nrows, uniform_row_length=uniform_row_length, nvals=nvals, internal=_row_partition_factory_key) # ============================================================================= # Composite Tensor # ============================================================================= @property def _type_spec(self): return RowPartitionSpec.from_value(self) # =============================================================================== # RowPartitionSpec # =============================================================================== # TODO(edloper): Consider refactoring RowPartitionSpec to allow any combination # of precomputed row-partition encodings (rather than always using row_splits). @type_spec_registry.register("tf.RowPartitionSpec")
RowPartition
python
ray-project__ray
rllib/env/tests/test_single_agent_env_runner.py
{ "start": 570, "end": 17270 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls) -> None: ray.init() tune.register_env( "tune-registered", lambda cfg: SimpleCorridor({"corridor_length": 10} | cfg), ) tune.register_env( "tune-registered-vector", lambda cfg: CartPoleVectorEnv(**cfg), ) gym.register( "TestEnv-v0", entry_point=SimpleCorridor, kwargs={"corridor_length": 10}, ) gym.register( "TestEnv-v1", entry_point=SwimmerEnv, kwargs={"forward_reward_weight": 2.0, "reset_noise_scale": 0.2}, ) @classmethod def tearDownClass(cls) -> None: ray.shutdown() _global_registry.unregister(ENV_CREATOR, "tune-registered") _global_registry.unregister(ENV_CREATOR, "tune-registered-vector") gym.registry.pop("TestEnv-v0") gym.registry.pop("TestEnv-v1") def test_distributed_env_runner(self): """Tests, whether SingleAgentEnvRunner can be distributed.""" remote_class = ray.remote(num_cpus=1, num_gpus=0)(SingleAgentEnvRunner) # Test with both parallelized sub-envs and w/o. async_vectorization_mode = [False, True] for async_ in async_vectorization_mode: for env_spec in ["tune-registered", "CartPole-v1", SimpleCorridor]: config = ( AlgorithmConfig().environment(env_spec) # Vectorize x5 and by default, rollout 10 timesteps per individual # env. .env_runners( num_env_runners=5, num_envs_per_env_runner=5, rollout_fragment_length=10, remote_worker_envs=async_, ) ) array = [ remote_class.remote(config=config) for _ in range(config.num_env_runners) ] # Sample in parallel. results = [a.sample.remote(random_actions=True) for a in array] results = ray.get(results) # Loop over individual EnvRunner Actor's results and inspect each. for episodes in results: # Assert length of all fragments >= `rollout_fragment_length * num_envs_per_env_runner` and # < rollout_fragment_length * (num_envs_per_env_runner + 1) self.assertIn( sum(len(e) for e in episodes), [ config.num_envs_per_env_runner * config.rollout_fragment_length + i for i in range(config.num_envs_per_env_runner) ], ) def test_sample( self, num_envs_per_env_runner=5, expected_episodes=10, expected_timesteps=20, rollout_fragment_length=64, ): config = ( AlgorithmConfig() .environment("CartPole-v1") .env_runners( num_envs_per_env_runner=num_envs_per_env_runner, rollout_fragment_length=rollout_fragment_length, ) ) env_runner = SingleAgentEnvRunner(config=config) # Expect error if both num_timesteps and num_episodes given. self.assertRaises( AssertionError, lambda: env_runner.sample( num_timesteps=10, num_episodes=10, random_actions=True ), ) # Verify that an error is raised if a negative number is used self.assertRaises( AssertionError, lambda: env_runner.sample(num_timesteps=-1, random_actions=True), ) self.assertRaises( AssertionError, lambda: env_runner.sample(num_episodes=-1, random_actions=True), ) # Sample 10 episodes (2 per env, because num_envs_per_env_runner=5) # Repeat 100 times for _ in range(100): episodes = env_runner.sample( num_episodes=expected_episodes, random_actions=True ) self.assertTrue(len(episodes) == expected_episodes) # Since we sampled complete episodes, there should be no ongoing episodes # being returned. self.assertTrue(all(e.is_done for e in episodes)) self.assertTrue(all(e.t_started == 0 for e in episodes)) # Sample 20 timesteps (4 per env) # Repeat 100 times env_runner.sample(random_actions=True) # for the `e.t_started > 0` for _ in range(100): episodes = env_runner.sample( num_timesteps=expected_timesteps, random_actions=True ) # Check the sum of lengths of all episodes returned. total_timesteps = sum(len(e) for e in episodes) self.assertTrue( expected_timesteps <= total_timesteps <= expected_timesteps + num_envs_per_env_runner ) self.assertTrue(any(e.t_started > 0 for e in episodes)) # Sample a number of timesteps that's not a factor of the number of environments # Repeat 100 times expected_uneven_timesteps = expected_timesteps + num_envs_per_env_runner // 2 for _ in range(100): episodes = env_runner.sample( num_timesteps=expected_uneven_timesteps, random_actions=True ) # Check the sum of lengths of all episodes returned. total_timesteps = sum(len(e) for e in episodes) self.assertTrue( expected_uneven_timesteps <= total_timesteps <= expected_uneven_timesteps + num_envs_per_env_runner, ) self.assertTrue(any(e.t_started > 0 for e in episodes)) # Sample rollout_fragment_length=64, 100 times # Repeat 100 times for _ in range(100): episodes = env_runner.sample(random_actions=True) # Check, whether the sum of lengths of all episodes returned is 320 # 5 (num_env_per_worker) * 64 (rollout_fragment_length). total_timesteps = sum(len(e) for e in episodes) self.assertTrue( num_envs_per_env_runner * rollout_fragment_length <= total_timesteps <= ( num_envs_per_env_runner * rollout_fragment_length + num_envs_per_env_runner ) ) self.assertTrue(any(e.t_started > 0 for e in episodes)) # Test that force_reset will create episodes from scratch even with `num_timesteps` episodes = env_runner.sample( num_timesteps=expected_timesteps, random_actions=True, force_reset=True ) self.assertTrue(all(e.t_started == 0 for e in episodes)) episodes = env_runner.sample( num_timesteps=expected_timesteps, random_actions=True, force_reset=False ) self.assertTrue(any(e.t_started > 0 for e in episodes)) @patch(target="ray.rllib.env.env_runner.logger") def test_step_failed_reset_required(self, mock_logger): """Tests, whether SingleAgentEnvRunner can handle StepFailedResetRequired.""" # Define an env that raises StepFailedResetRequired class ErrorRaisingEnv(gym.Env): def __init__(self, config=None): # As per gymnasium standard, provide observation and action spaces in your # constructor. self.observation_space = gym.spaces.Discrete(2) self.action_space = gym.spaces.Discrete(2) self.exception_type = config["exception_type"] def reset(self, *, seed=None, options=None): return self.observation_space.sample(), {} def step(self, action): raise self.exception_type() config = ( AlgorithmConfig() .environment( ErrorRaisingEnv, env_config={"exception_type": StepFailedRecreateEnvError}, ) .env_runners(num_envs_per_env_runner=1, rollout_fragment_length=10) .fault_tolerance(restart_failed_sub_environments=True) ) env_runner = SingleAgentEnvRunner(config=config) # Check that we don't log the error on the first step (because we don't raise StepFailedResetRequired) # We need two steps because the first one naturally raises ResetNeeded because we try to step before the env is reset. env_runner._try_env_reset() env_runner._try_env_step(actions=[None]) assert mock_logger.exception.call_count == 0 config.environment(ErrorRaisingEnv, env_config={"exception_type": ValueError}) env_runner = SingleAgentEnvRunner(config=config) # Check that we don't log the error on the first step (because we don't raise StepFailedResetRequired) # We need two steps because the first one naturally raises ResetNeeded because we try to step before the env is reset. env_runner._try_env_reset() env_runner._try_env_step(actions=[None]) assert mock_logger.exception.call_count == 1 def test_vector_env(self, num_envs_per_env_runner=5, rollout_fragment_length=10): """Tests, whether SingleAgentEnvRunner can run various vectorized envs.""" # "ALE/Pong-v5" works but ale-py is not installed on microcheck for env in ["CartPole-v1", SimpleCorridor, "tune-registered"]: config = ( AlgorithmConfig() .environment(env) .env_runners( num_envs_per_env_runner=num_envs_per_env_runner, rollout_fragment_length=rollout_fragment_length, ) ) env_runner = SingleAgentEnvRunner(config=config) # Sample with the async-vectorized env. for i in range(100): episodes = env_runner.sample(random_actions=True) total_timesteps = sum(len(e) for e in episodes) self.assertTrue( num_envs_per_env_runner * rollout_fragment_length <= total_timesteps <= ( num_envs_per_env_runner * rollout_fragment_length + num_envs_per_env_runner ) ) env_runner.stop() def test_env_context(self): """Tests, whether SingleAgentEnvRunner can pass kwargs to the environments correctly.""" # default without env configs config = AlgorithmConfig().environment("Swimmer-v4") env_runner = SingleAgentEnvRunner(config=config) assert env_runner.env.env.get_attr("_forward_reward_weight") == (1.0,) assert env_runner.env.env.get_attr("_reset_noise_scale") == (0.1,) # Test gym registered environment env with kwargs config = AlgorithmConfig().environment( "Swimmer-v4", env_config={"forward_reward_weight": 2.0, "reset_noise_scale": 0.2}, ) env_runner = SingleAgentEnvRunner(config=config) assert env_runner.env.env.get_attr("_forward_reward_weight") == (2.0,) assert env_runner.env.env.get_attr("_reset_noise_scale") == (0.2,) # Test gym registered environment env with pre-set kwargs config = AlgorithmConfig().environment("TestEnv-v1") env_runner = SingleAgentEnvRunner(config=config) assert env_runner.env.env.get_attr("_forward_reward_weight") == (2.0,) assert env_runner.env.env.get_attr("_reset_noise_scale") == (0.2,) # Test using a mixture of registered kwargs and env configs config = AlgorithmConfig().environment( "TestEnv-v1", env_config={"forward_reward_weight": 3.0} ) env_runner = SingleAgentEnvRunner(config=config) assert env_runner.env.env.get_attr("_forward_reward_weight") == (3.0,) assert env_runner.env.env.get_attr("_reset_noise_scale") == (0.2,) # Test env-config with Tune registered or callable # default config = AlgorithmConfig().environment("tune-registered") env_runner = SingleAgentEnvRunner(config=config) assert env_runner.env.env.get_attr("end_pos") == (10.0,) # tune-registered config = AlgorithmConfig().environment( "tune-registered", env_config={"corridor_length": 5.0} ) env_runner = SingleAgentEnvRunner(config=config) assert env_runner.env.env.get_attr("end_pos") == (5.0,) # callable config = AlgorithmConfig().environment( SimpleCorridor, env_config={"corridor_length": 5.0} ) env_runner = SingleAgentEnvRunner(config=config) assert env_runner.env.env.get_attr("end_pos") == (5.0,) def test_vectorize_mode(self): """Test different vectorize mode for creating the environment.""" # default config = ( AlgorithmConfig() .environment("CartPole-v1") .env_runners(num_envs_per_env_runner=3) ) env_runner = SingleAgentEnvRunner(config=config) assert isinstance(env_runner.env.env, gym.vector.SyncVectorEnv) # different vectorize mode options contained in gymnasium registry for env_name, mode, expected_env_type in [ ("CartPole-v1", "sync", gym.vector.SyncVectorEnv), ("CartPole-v1", gym.VectorizeMode.SYNC, gym.vector.SyncVectorEnv), ("CartPole-v1", "async", gym.vector.AsyncVectorEnv), ("CartPole-v1", gym.VectorizeMode.ASYNC, gym.vector.AsyncVectorEnv), ("CartPole-v1", "vector_entry_point", CartPoleVectorEnv), ("CartPole-v1", gym.VectorizeMode.VECTOR_ENTRY_POINT, CartPoleVectorEnv), # TODO (mark) re-add with ale-py 0.11 support # ("ALE/Pong-v5", "vector_entry_point", AtariVectorEnv), # ("ALE/Pong-v5", gym.VectorizeMode.VECTOR_ENTRY_POINT, AtariVectorEnv), ]: config = ( AlgorithmConfig() .environment(env_name) .env_runners(gym_env_vectorize_mode=mode, num_envs_per_env_runner=3) ) env_runner = SingleAgentEnvRunner(config=config) assert isinstance(env_runner.env.env, expected_env_type) # test with tune registered vector environment config = ( AlgorithmConfig() .environment( "tune-registered-vector", env_config={"sutton_barto_reward": True} ) .env_runners( gym_env_vectorize_mode="vector_entry_point", num_envs_per_env_runner=3 ) ) env_runner = SingleAgentEnvRunner(config=config) assert isinstance(env_runner.env.env, CartPoleVectorEnv) assert env_runner.env.env._sutton_barto_reward is True # test with callable vector environment config = ( AlgorithmConfig() .environment( lambda cfg: CartPoleVectorEnv(**cfg), env_config={"sutton_barto_reward": True}, ) .env_runners( gym_env_vectorize_mode="vector_entry_point", num_envs_per_env_runner=3 ) ) env_runner = SingleAgentEnvRunner(config=config) assert isinstance(env_runner.env.env, CartPoleVectorEnv) assert env_runner.env.env._sutton_barto_reward is True # check passing the env config with a gym_env_vectorize_mode config = ( AlgorithmConfig() .environment("CartPole-v1", env_config={"sutton_barto_reward": True}) .env_runners(gym_env_vectorize_mode="sync", num_envs_per_env_runner=3) ) env_runner = SingleAgentEnvRunner(config=config) assert env_runner.env.env.get_attr("_sutton_barto_reward") == (True, True, True) config = ( AlgorithmConfig() .environment("CartPole-v1", env_config={"sutton_barto_reward": True}) .env_runners( gym_env_vectorize_mode="vector_entry_point", num_envs_per_env_runner=3 ) ) env_runner = SingleAgentEnvRunner(config=config) assert env_runner.env.env._sutton_barto_reward is True if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestSingleAgentEnvRunner
python
django-extensions__django-extensions
tests/test_shortuuid_field.py
{ "start": 191, "end": 1207 }
class ____(TestCase): def test_UUID_field_create(self): j = ShortUUIDTestModel_field.objects.create( a=6, uuid_field="vytxeTZskVKR7C7WgdSP3d" ) self.assertEqual(j.uuid_field, "vytxeTZskVKR7C7WgdSP3d") def test_UUID_field_pk_create(self): j = ShortUUIDTestModel_pk.objects.create(uuid_field="vytxeTZskVKR7C7WgdSP3d") self.assertEqual(j.uuid_field, "vytxeTZskVKR7C7WgdSP3d") self.assertEqual(j.pk, "vytxeTZskVKR7C7WgdSP3d") def test_UUID_field_pk_agregate_create(self): j = ShortUUIDTestAgregateModel.objects.create(a=6) self.assertEqual(j.a, 6) self.assertIsInstance(j.pk, str) self.assertTrue(len(j.pk) < 23) def test_UUID_field_manytomany_create(self): j = ShortUUIDTestManyToManyModel.objects.create( uuid_field="vytxeTZskVKR7C7WgdSP3e" ) self.assertEqual(j.uuid_field, "vytxeTZskVKR7C7WgdSP3e") self.assertEqual(j.pk, "vytxeTZskVKR7C7WgdSP3e")
ShortUUIDFieldTest
python
matplotlib__matplotlib
lib/matplotlib/_mathtext.py
{ "start": 27849, "end": 28315 }
class ____(DejaVuFonts): """ A font handling class for the DejaVu Sans fonts If a glyph is not found it will fallback to Stix Sans """ _fontmap = { 'rm': 'DejaVu Sans', 'it': 'DejaVu Sans:italic', 'bf': 'DejaVu Sans:weight=bold', 'bfit': 'DejaVu Sans:italic:bold', 'sf': 'DejaVu Sans', 'tt': 'DejaVu Sans Mono', 'ex': 'DejaVu Sans Display', 0: 'DejaVu Sans', }
DejaVuSansFonts
python
huggingface__transformers
src/transformers/models/blip/modeling_blip.py
{ "start": 38337, "end": 46840 }
class ____(BlipPreTrainedModel, GenerationMixin): config: BlipConfig _tied_weights_keys = { "text_decoder.cls.predictions.decoder.bias": "text_decoder.cls.predictions.bias", "text_decoder.cls.predictions.decoder.weight": "text_decoder.bert.embeddings.word_embeddings.weight", } def __init__(self, config: BlipConfig): super().__init__(config) self.vision_model = BlipVisionModel(config.vision_config) self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False) self.text_decoder = BlipTextLMHeadModel(config.text_config) self.decoder_pad_token_id = config.text_config.pad_token_id self.decoder_start_token_id = config.text_config.bos_token_id # Initialize weights and apply final processing self.post_init() def set_input_embeddings(self, value): self.text_encoder.set_input_embeddings(value) def get_input_embeddings(self): # This will return shared embeddings if they are shared else specific to encoder. return self.text_encoder.get_input_embeddings() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BlipTextVisionModelOutput]: r""" Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForQuestionAnswering >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> # training >>> text = "How many cats are in the picture?" >>> label = "2" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> labels = processor(text=label, return_tensors="pt").input_ids >>> inputs["labels"] = labels >>> outputs = model(**inputs) >>> loss = outputs.loss >>> loss.backward() >>> # inference >>> text = "How many cats are in the picture?" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) 2 ```""" if labels is None and decoder_input_ids is None: raise ValueError( "Either `decoder_input_ids` or `labels` should be passed when calling `forward` with" " `BlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you" " are using the model for inference make sure that `decoder_input_ids` is passed or call `generate`" ) vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs, ) image_embeds = vision_outputs.last_hidden_state image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long) question_embeds = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, **kwargs, ) if labels is not None and decoder_input_ids is None: # labels are already shifted right, see: https://github.com/huggingface/transformers/pull/23153 decoder_input_ids = labels question_embeds = question_embeds[0] answer_output = self.text_decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=question_embeds, encoder_attention_mask=attention_mask, labels=labels, reduction="mean", **kwargs, ) if labels is not None: decoder_loss = answer_output.loss.mean() else: decoder_loss = None return BlipTextVisionModelOutput( loss=decoder_loss, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, ) @torch.no_grad() def generate( self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, attention_mask: Optional[torch.LongTensor] = None, interpolate_pos_encoding: bool = False, **generate_kwargs, ) -> torch.LongTensor: r""" Overrides *generate* function to be able to use the model as a conditional generator Parameters: input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*): The sequence used as a prompt for the generation. pixel_values (*torch.FloatTensor* of shape *(batch_size, num_channels, image_height, image_width)*: Input image to be processed attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for tokens that are NOT MASKED, `0` for MASKED tokens. **generate_kwargs: Additional arguments passed to the *generate* function of the decoder Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, BlipForQuestionAnswering >>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base") >>> processor = AutoProcessor.from_pretrained("Salesforce/blip-vqa-base") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text = "How many cats are in the picture?" >>> inputs = processor(images=image, text=text, return_tensors="pt") >>> outputs = model.generate(**inputs) >>> print(processor.decode(outputs[0], skip_special_tokens=True)) 2 ``` """ vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) if isinstance(input_ids, list): input_ids = torch.LongTensor(input_ids) question_outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=False, ) question_embeds = question_outputs[0] question_attention_mask = torch.ones( question_embeds.size()[:-1], dtype=torch.long, device=question_embeds.device ) bos_ids = torch.full( (question_embeds.size(0), 1), fill_value=self.decoder_start_token_id, device=question_embeds.device ) outputs = self.text_decoder.generate( input_ids=bos_ids, eos_token_id=self.config.text_config.sep_token_id, pad_token_id=self.config.text_config.pad_token_id, encoder_hidden_states=question_embeds, encoder_attention_mask=question_attention_mask, **generate_kwargs, ) return outputs @auto_docstring( custom_intro=""" BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to the image. """ )
BlipForQuestionAnswering
python
Textualize__textual
docs/examples/guide/styles/padding01.py
{ "start": 401, "end": 750 }
class ____(App): def compose(self) -> ComposeResult: self.widget = Static(TEXT) yield self.widget def on_mount(self) -> None: self.widget.styles.background = "purple" self.widget.styles.width = 30 self.widget.styles.padding = 2 if __name__ == "__main__": app = PaddingApp() app.run()
PaddingApp
python
jazzband__django-model-utils
model_utils/tracker.py
{ "start": 15859, "end": 16654 }
class ____(FieldInstanceTracker): def has_changed(self, field: str) -> bool: """Returns ``True`` if field has changed from currently saved value""" if not self.instance.pk: return True elif field in self.saved_data: prev: object = self.previous(field) curr: object = self.get_field_value(field) return prev != curr else: raise FieldError('field "%s" not tracked' % field) def changed(self) -> dict[str, Any]: """Returns dict of fields that changed since save (with old values)""" if not self.instance.pk: return {} saved = self.saved_data.items() current = self.current() return {k: v for k, v in saved if v != current[k]}
ModelInstanceTracker
python
pyodide__pyodide
src/py/pyodide/console.py
{ "start": 20665, "end": 24703 }
class ____(Console): # TODO: Figure out proper SKIPIF syntax for Firefox and Safari """ A subclass of :py:class:`Console` that uses :js:func:`pyodide.loadPackagesFromImports` before running the code. Example: >>> from pyodide.console import PyodideConsole # doctest: +SKIP >>> console = PyodideConsole() # doctest: +SKIP >>> # This will automatically load numpy before execution >>> future = console.push("import numpy as np; print(np.array([1, 2, 3]))") # doctest: +SKIP >>> print(future.syntax_check) # doctest: +SKIP complete # doctest: +SKIP """ async def runcode(self, source: str, code: CodeRunner) -> ConsoleFuture: """Execute a code object. All exceptions are caught except SystemExit, which is reraised. Returns ------- The return value is a dependent sum type with the following possibilities: * `("success", result : Any)` -- the code executed successfully * `("exception", message : str)` -- An exception occurred. `message` is the result of calling :py:meth:`Console.formattraceback`. """ from pyodide_js import loadPackagesFromImports await loadPackagesFromImports(source) return await super().runcode(source, code) def shorten( text: str, limit: int = 1000, split: int | None = None, separator: str = "..." ) -> str: """Shorten ``text`` if it is longer than ``limit``. If ``len(text) <= limit`` then return ``text`` unchanged. If ``text`` is longer than ``limit`` then return the firsts ``split`` characters and the last ``split`` characters separated by ``separator``. The default value for ``split`` is `limit // 2`. Values of ``split`` larger than ``len(value) // 2`` will have the same effect as when ``split`` is `len(value) // 2`. A value error is raised if ``limit`` is less than 2. Parameters ---------- text : The string to shorten if it is longer than ``limit``. limit : The integer to compare against the length of ``text``. Defaults to ``1000``. split : The integer of the split string to return. Defaults to ``limit // 2``. separator : The string of the separator string. Defaults to ``"..."``. Returns ------- If ``text`` is longer than ``limit``, return the shortened string, otherwise return ``text``. Examples -------- >>> from pyodide.console import shorten >>> sep = "_" >>> shorten("abcdefg", limit=5, separator=sep) 'ab_fg' >>> shorten("abcdefg", limit=12, separator=sep) 'abcdefg' >>> shorten("abcdefg", limit=6, separator=sep) 'abc_efg' >>> shorten("abcdefg", limit=6, split=1, separator=sep) 'a_g' """ if limit < 2: raise ValueError("limit must be greater than or equal to 2.") if split is None: split = limit // 2 split = min(split, len(text) // 2) if len(text) > limit: text = f"{text[:split]}{separator}{text[-split:]}" return text def repr_shorten( value: Any, limit: int = 1000, split: int | None = None, separator: str = "..." ) -> str: """Compute the string representation of ``value`` and shorten it if necessary. This is equivalent to ``shorten(repr(value), limit, split, separator)``, but a value error is raised if ``limit`` is less than ``4``. Examples -------- >>> from pyodide.console import repr_shorten >>> sep = "_" >>> repr_shorten("abcdefg", limit=8, separator=sep) "'abc_efg'" >>> repr_shorten("abcdefg", limit=12, separator=sep) "'abcdefg'" >>> for i in range(4, 10): ... repr_shorten(123456789, limit=i, separator=sep) '12_89' '12_89' '123_789' '123_789' '1234_6789' '123456789' """ if limit < 4: raise ValueError("limit must be greater than or equal to 4.") text = repr(value) return shorten(text, limit=limit, split=split, separator=separator)
PyodideConsole
python
allegroai__clearml
clearml/backend_api/services/v2_9/auth.py
{ "start": 15037, "end": 16137 }
class ____(Request): """ Revokes (and deletes) a set (key, secret) of credentials for the authenticated user. :param access_key: Credentials key :type access_key: str """ _service = "auth" _action = "revoke_credentials" _version = "2.9" _schema = { "definitions": {}, "properties": {"access_key": {"description": "Credentials key", "type": ["string", "null"]}}, "required": ["key_id"], "type": "object", } def __init__(self, access_key: Optional[str] = None, **kwargs: Any) -> None: super(RevokeCredentialsRequest, self).__init__(**kwargs) self.access_key = access_key @schema_property("access_key") def access_key(self) -> Optional[str]: return self._property_access_key @access_key.setter def access_key(self, value: Optional[str]) -> None: if value is None: self._property_access_key = None return self.assert_isinstance(value, "access_key", six.string_types) self._property_access_key = value
RevokeCredentialsRequest
python
great-expectations__great_expectations
great_expectations/execution_engine/partition_and_sample/data_partitioner.py
{ "start": 1325, "end": 2463 }
class ____(enum.Enum): """The names of available partitioner_methods.""" PARTITION_ON_YEAR = "partition_on_year" PARTITION_ON_YEAR_AND_MONTH = "partition_on_year_and_month" PARTITION_ON_YEAR_AND_MONTH_AND_DAY = "partition_on_year_and_month_and_day" PARTITION_ON_DATE_PARTS = "partition_on_date_parts" PARTITION_ON_WHOLE_TABLE = "partition_on_whole_table" PARTITION_ON_COLUMN_VALUE = "partition_on_column_value" PARTITION_ON_CONVERTED_DATETIME = "partition_on_converted_datetime" PARTITION_ON_DIVIDED_INTEGER = "partition_on_divided_integer" PARTITION_ON_MOD_INTEGER = "partition_on_mod_integer" PARTITION_ON_MULTI_COLUMN_VALUES = "partition_on_multi_column_values" PARTITION_ON_HASHED_COLUMN = "partition_on_hashed_column" @override def __eq__(self, other: str | PartitionerMethod): # type: ignore[override] # expects `object` if isinstance(other, str): return self.value.lower() == other.lower() return self.value.lower() == other.value.lower() @override def __hash__(self: PartitionerMethod): return hash(self.value)
PartitionerMethod
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/table.py
{ "start": 1373, "end": 1577 }
class ____(graphene.ObjectType): constraints = graphene.Field(GrapheneTableConstraints) columns = non_null_list(GrapheneTableColumn) class Meta: name = "TableSchema"
GrapheneTableSchema
python
google__pytype
pytype/pretty_printer_base.py
{ "start": 328, "end": 4564 }
class ____(abc.ABC): """Pretty printer methods depending only on pytd types. Subclasses are expected to handle abstract->pytd conversion. """ def __init__(self, ctx): self.ctx = ctx @staticmethod def show_constant(val: types.BaseValue) -> str: """Pretty-print a value if it is a constant. Recurses into a constant, printing the underlying Python value for constants and just using "..." for everything else (e.g., Variables). This is useful for generating clear error messages that show the exact values related to an error while preventing implementation details from leaking into the message. Args: val: an abstract value. Returns: A string of the pretty-printed constant. """ def _ellipsis_printer(v): if isinstance(v, types.PythonConstant): return v.str_of_constant(_ellipsis_printer) # pytype: disable=attribute-error return "..." return _ellipsis_printer(val) def print_pytd(self, pytd_type: pytd.Type) -> str: """Print the name of the pytd type.""" typ = pytd_utils.CanonicalOrdering( optimize.Optimize(pytd_type.Visit(visitors.RemoveUnknownClasses())) ) name = pytd_utils.Print(typ) # Clean up autogenerated namedtuple names, e.g. "namedtuple-X-a-_0-c" # becomes just "X", by extracting out just the type name. if "namedtuple" in name: return escape.unpack_namedtuple(name) nested_class_match = re.search(r"_(?:\w+)_DOT_", name) if nested_class_match: # Pytype doesn't have true support for nested classes. Instead, for # class Foo: # class Bar: ... # it outputs: # class _Foo_DOT_Bar: ... # class Foo: # Bar = ... # type: Type[_Foo_DOT_Bar] # Replace _Foo_DOT_Bar with Foo.Bar in error messages for readability. # TODO(b/35138984): Get rid of this hack. start = nested_class_match.start() return name[:start] + name[start + 1 :].replace("_DOT_", ".") return name def join_printed_types(self, typs: Iterable[str]) -> str: """Pretty-print the union of the printed types.""" typs = set(typs) # dedup if len(typs) == 1: return next(iter(typs)) elif typs: literal_contents = set() optional = False new_types = [] for t in typs: if t.startswith("Literal["): literal_contents.update(t[len("Literal[") : -1].split(", ")) elif t == "None": optional = True else: new_types.append(t) if literal_contents: literal = f"Literal[{', '.join(sorted(literal_contents))}]" new_types.append(literal) if len(new_types) > 1: out = f"Union[{', '.join(sorted(new_types))}]" else: out = new_types[0] if optional: out = f"Optional[{out}]" return out else: # TODO(mdemello): change this to Never return "nothing" @abc.abstractmethod def print_generic_type(self, t: types.BaseValue) -> str: """Returns a string of the generic type of t. For example, if t is `[0]`, then this method returns "list[int]". Args: t: An abstract value. """ @abc.abstractmethod def print_type_of_instance(self, t: types.BaseValue, instance=None) -> str: """Returns a string of the type of an instance of t. For example, if t is `int`, then this method returns "int". Args: t: An abstract value. instance: A specific instance of t to print. """ @abc.abstractmethod def print_type(self, t, literal=False) -> str: """Returns a string of the type of t. For example, if t is `0`, then this method returns "int" with literal=False or `Literal[0]` with literal=True. Args: t: An abstract value. literal: Whether to print literals literally. """ @abc.abstractmethod def print_function_def(self, fn: types.Function) -> str: """Print a function definition.""" @abc.abstractmethod def print_var_type(self, var: types.Variable, *args) -> str: """Print a pytype variable as a type.""" @abc.abstractmethod def show_variable(self, var: types.Variable) -> str: """Show variable as 'name: typ' or 'pyval: typ' if available."""
PrettyPrinterBase
python
huggingface__transformers
src/transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py
{ "start": 39116, "end": 46110 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen3OmniMoeTalker`]. It is used to instantiate a Qwen3-Omni multi-modal talker model capable of handling text, audio, and vision modalities in a unified architecture. The model integrates a text decoder with a code predictor for autoregressive generation of both semantic and acoustic tokens, enabling speech and multimodal content generation. This configuration wraps sub-configurations for the text and code predictor components, allowing modular setup and initialization. e.g. [Qwen/Qwen3-Omni-7B](https://huggingface.co/Qwen/Qwen3-Omni-7B) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: code_predictor_config (`dict`, *optional*): A dictionary of configuration parameters used to initialize a [`Qwen3OmniMoeTalkerCodePredictorConfig`]. If not provided, defaults will be used. text_config (`dict`, *optional*): A dictionary of configuration parameters used to initialize a [`Qwen3OmniMoeTalkerTextConfig`]. If not provided, defaults will be used. num_code_groups (`int`, *optional*, defaults to 32): Number of codebook groups used in the predicted acoustic token sequence, corresponding to multi-codebook VQ representation. thinker_hidden_size (`int`, *optional*, defaults to 2048): Hidden dimension size of the thinker module used for intermediate reasoning or latent planning before audio generation. codec_eos_token_id (`int`, *optional*, defaults to 4198): Token ID representing the end-of-speech token in the codec-generated sequence. accept_hidden_layer (`int`, *optional*, defaults to 18): Index of the hidden layer whose output is used for accepting or refining generated tokens during think-and-speak process. codec_nothink_id (`int`, *optional*, defaults to 4203): Token ID indicating no thinking step is required during generation. codec_think_bos_id (`int`, *optional*, defaults to 4204): Token ID marking the beginning of a thinking sequence. codec_think_eos_id (`int`, *optional*, defaults to 4205): Token ID marking the end of a thinking sequence. codec_pad_id (`int`, *optional*, defaults to 4196): Padding token ID used in codec input sequences. codec_bos_id (`int`, *optional*, defaults to 4197): Beginning-of-speech token ID in codec sequences. audio_token_id (`int`, *optional*, defaults to 151646): Special token ID used to indicate the position of audio tokens in the input sequence. image_token_id (`int`, *optional*, defaults to 151655): Special token ID used to represent image inputs in the multimodal context. video_token_id (`int`, *optional*, defaults to 151656): Special token ID used to represent video inputs. vision_start_token_id (`int`, *optional*, defaults to 151652): Token ID indicating the start of a visual input sequence (e.g., image or video embeddings). position_id_per_seconds (`int`, *optional*, defaults to 25): Number of position IDs allocated per second of audio content, used for temporal alignment in generation. audio_start_token_id (`int`, *optional*, defaults to 151669): Token ID that indicates the start of an audio generation segment in the output. speaker_id (`dict`, *optional*): Speaker name to speaker id dict. Example: ```python >>> from transformers import Qwen3OmniMoeTalkerConfig, Qwen3OmniMoeTalker >>> # Initialize a Qwen3OmniMoeTalkerConfig with default sub-configurations >>> config = Qwen3OmniMoeTalkerConfig( ... num_code_groups=32, ... thinker_hidden_size=2048, ... ) >>> # Initialize the full Qwen3-Omni Talker model >>> model = Qwen3OmniMoeTalker(config) >>> # Access the model configuration >>> config = model.config >>> print(config.text_config) # Access text decoder configuration >>> print(config.code_predictor_config) # Access code predictor configuration ```""" sub_configs = { "code_predictor_config": Qwen3OmniMoeTalkerCodePredictorConfig, "text_config": Qwen3OmniMoeTalkerTextConfig, } def __init__( self, code_predictor_config=None, text_config=None, num_code_groups=32, thinker_hidden_size=2048, codec_eos_token_id=4198, accept_hidden_layer=18, codec_nothink_id=4203, codec_think_bos_id=4204, codec_think_eos_id=4205, codec_pad_id=4196, codec_bos_id=4197, audio_token_id=151646, image_token_id=151655, video_token_id=151656, vision_start_token_id=151652, position_id_per_seconds=25, audio_start_token_id=151669, speaker_id=None, **kwargs, ): if code_predictor_config is None: code_predictor_config = {} self.code_predictor_config = Qwen3OmniMoeTalkerCodePredictorConfig() logger.info("code_predictor_config is None. Initializing code_predictor_config model with default values") elif isinstance(code_predictor_config, Qwen3OmniMoeTalkerCodePredictorConfig): self.code_predictor_config = code_predictor_config else: self.code_predictor_config = Qwen3OmniMoeTalkerCodePredictorConfig(**code_predictor_config) if text_config is None: text_config = {} self.text_config = Qwen3OmniMoeTalkerTextConfig() logger.info("talker text_config is None. Initializing talker text model with default values") elif isinstance(text_config, Qwen3OmniMoeTalkerTextConfig): self.text_config = text_config else: self.text_config = Qwen3OmniMoeTalkerTextConfig(**text_config) self.num_code_groups = num_code_groups self.thinker_hidden_size = thinker_hidden_size self.codec_eos_token_id = codec_eos_token_id self.accept_hidden_layer = accept_hidden_layer self.codec_nothink_id = codec_nothink_id self.codec_think_bos_id = codec_think_bos_id self.codec_think_eos_id = codec_think_eos_id self.codec_pad_id = codec_pad_id self.codec_bos_id = codec_bos_id self.audio_token_id = audio_token_id self.image_token_id = image_token_id self.video_token_id = video_token_id self.position_id_per_seconds = position_id_per_seconds self.audio_start_token_id = audio_start_token_id self.vision_start_token_id = vision_start_token_id self.speaker_id = speaker_id super().__init__(**kwargs)
Qwen3OmniMoeTalkerConfig
python
openai__openai-python
src/openai/types/beta/realtime/response_content_part_added_event.py
{ "start": 635, "end": 1232 }
class ____(BaseModel): content_index: int """The index of the content part in the item's content array.""" event_id: str """The unique ID of the server event.""" item_id: str """The ID of the item to which the content part was added.""" output_index: int """The index of the output item in the response.""" part: Part """The content part that was added.""" response_id: str """The ID of the response.""" type: Literal["response.content_part.added"] """The event type, must be `response.content_part.added`."""
ResponseContentPartAddedEvent
python
openai__openai-python
src/openai/types/graders/label_model_grader.py
{ "start": 1145, "end": 1533 }
class ____(BaseModel): content: InputContent """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. One of `user`, `assistant`, `system`, or `developer`. """ type: Optional[Literal["message"]] = None """The type of the message input. Always `message`."""
Input
python
kamyu104__LeetCode-Solutions
Python/minimum-time-for-k-virus-variants-to-spread.py
{ "start": 109, "end": 2560 }
class ____(object): # 0-based index def __init__(self, N, build_fn=lambda x, y: [y]*(2*x), query_fn=lambda x, y: y if x is None else max(x, y), update_fn=lambda x, y: y if x is None else x+y, default_val=0): self.N = N self.H = (N-1).bit_length() self.query_fn = query_fn self.update_fn = update_fn self.default_val = default_val self.tree = build_fn(N, default_val) self.lazy = [None]*N def __apply(self, x, val): self.tree[x] = self.update_fn(self.tree[x], val) if x < self.N: self.lazy[x] = self.update_fn(self.lazy[x], val) def update(self, L, R, h): # Time: O(logN), Space: O(N) def pull(x): while x > 1: x //= 2 self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2+1]) if self.lazy[x] is not None: self.tree[x] = self.update_fn(self.tree[x], self.lazy[x]) L += self.N R += self.N L0, R0 = L, R while L <= R: if L & 1: # is right child self.__apply(L, h) L += 1 if R & 1 == 0: # is left child self.__apply(R, h) R -= 1 L //= 2 R //= 2 pull(L0) pull(R0) def query(self, L, R): # Time: O(logN), Space: O(N) def push(x): n = 2**self.H while n != 1: y = x // n if self.lazy[y] is not None: self.__apply(y*2, self.lazy[y]) self.__apply(y*2 + 1, self.lazy[y]) self.lazy[y] = None n //= 2 result = None if L > R: return result L += self.N R += self.N push(L) push(R) while L <= R: if L & 1: # is right child result = self.query_fn(result, self.tree[L]) L += 1 if R & 1 == 0: # is left child result = self.query_fn(result, self.tree[R]) R -= 1 L //= 2 R //= 2 return result def __str__(self): showList = [] for i in xrange(self.N): showList.append(self.query(i, i)) return ",".join(map(str, showList)) # competitive programming solution
SegmentTree
python
doocs__leetcode
solution/0900-0999/0912.Sort an Array/Solution2.py
{ "start": 0, "end": 800 }
class ____: def sortArray(self, nums: List[int]) -> List[int]: def merge_sort(l, r): if l >= r: return mid = (l + r) >> 1 merge_sort(l, mid) merge_sort(mid + 1, r) i, j = l, mid + 1 tmp = [] while i <= mid and j <= r: if nums[i] <= nums[j]: tmp.append(nums[i]) i += 1 else: tmp.append(nums[j]) j += 1 if i <= mid: tmp.extend(nums[i : mid + 1]) if j <= r: tmp.extend(nums[j : r + 1]) for i in range(l, r + 1): nums[i] = tmp[i - l] merge_sort(0, len(nums) - 1) return nums
Solution
python
django-crispy-forms__django-crispy-forms
tests/forms.py
{ "start": 4600, "end": 4952 }
class ____(BaseModelForm): is_company = forms.CharField(label="company", required=False, widget=forms.CheckboxInput()) password2 = forms.CharField(label="re-enter password", max_length=30, required=True, widget=forms.PasswordInput()) class Meta: model = CrispyTestModel fields = ("email", "password", "password2")
SampleForm7
python
huggingface__transformers
tests/models/vit/test_image_processing_vit.py
{ "start": 2697, "end": 4199 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ViTImageProcessor if is_vision_available() else None fast_image_processing_class = ViTImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = ViTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42})
ViTImageProcessingTest
python
wandb__wandb
tests/unit_tests/test_wandb_summary.py
{ "start": 194, "end": 3154 }
class ____: # current_dict: t.Dict # summary_record: t.Optional[SummaryRecord] def __init__(self, current_dict: Dict) -> None: self.reset(current_dict) def reset(self, current_dict: Dict) -> None: self.summary_record = None self.current_dict = current_dict def update_callback(self, summary_record: "SummaryRecord") -> None: self.summary_record = summary_record def get_current_summary_callback(self) -> Dict: return self.current_dict def check_updates(self, key: Tuple[str], value: Any) -> "MockCallback": assert self.summary_record is not None for item in self.summary_record.update: if item.key == key and item.value == value: return self raise AssertionError def check_removes(self, key: Tuple[str]) -> "MockCallback": assert self.summary_record is not None for item in self.summary_record.remove: if item.key == key: return self raise AssertionError def create_summary_and_mock( current_dict: Dict, ) -> Tuple["wandb_sdk.Summary", "MockCallback"]: m = MockCallback(current_dict) s = wandb_sdk.Summary( m.get_current_summary_callback, ) s._set_update_callback( m.update_callback, ) return s, m def test_attrib_get(): s, _ = create_summary_and_mock({"this": 2}) assert s.this == 2 def test_item_get(): s, _ = create_summary_and_mock({"this": 2}) assert s["this"] == 2 def test_cb_attrib(): s, m = create_summary_and_mock({}) s.this = 2 m.check_updates(("this",), 2) def test_cb_item(): s, m = create_summary_and_mock({}) s["this"] = 2 m.check_updates(("this",), 2) def test_cb_update(): s, m = create_summary_and_mock({}) s.update(dict(this=1, that=2)) m.check_updates(("this",), 1) m.check_updates(("that",), 2) def test_cb_item_nested(): s, m = create_summary_and_mock({}) s["this"] = 2 m.check_updates(("this",), 2) m.reset({}) s["that"] = dict(nest1=dict(nest2=4, nest2b=5)) m.check_updates(("that",), dict(nest1=dict(nest2=4, nest2b=5))) m.reset({"that": {"nest1": {}}}) s["that"]["nest1"]["nest2"] = 3 m.check_updates(("that", "nest1", "nest2"), 3) m.reset({"that": {}}) s["that"]["nest1"] = 8 m.check_updates(("that", "nest1"), 8) m.reset({"that": {}}) s["that"]["nest1a"] = dict(nest2c=9) m.check_updates(("that", "nest1a"), dict(nest2c=9)) def test_cb_delete_item(): s, m = create_summary_and_mock({"this": 3}) del s["this"] m.check_removes(("this",)) m.reset({"this": {"nest1": 2}}) del s["this"]["nest1"] m.check_removes(("this", "nest1")) def test_update_with_nested_dict(): s, m = create_summary_and_mock({}) s.update(dict(that=dict(nest1=dict(nest2=4, nest2b=5)))) m.check_updates(("that",), dict(nest1=dict(nest2=4, nest2b=5)))
MockCallback
python
getsentry__sentry
tests/sentry/preprod/pull_request/test_comment_types.py
{ "start": 143, "end": 22341 }
class ____: def test_parse_github_issue_comments_real_data(self): """Test parsing real GitHub issue comments from actual PR data.""" raw_comments = [ { "url": "https://api.github.com/repos/test-org/test-repo/issues/comments/1111111111", "html_url": "https://github.com/test-org/test-repo/pull/123#issuecomment-1111111111", "issue_url": "https://api.github.com/repos/test-org/test-repo/issues/123", "id": 1111111111, "node_id": "IC_kwDOKLDwMM7Bdug5", "user": { "login": "test-bot[bot]", "id": 11111111, "node_id": "MDM6Qm90NzU4NjQ3MjI=", "avatar_url": "https://avatars.githubusercontent.com/in/12345?v=4", "gravatar_id": "", "url": "https://api.github.com/users/test-bot%5Bbot%5D", "html_url": "https://github.com/apps/test-bot", "followers_url": "https://api.github.com/users/test-bot%5Bbot%5D/followers", "following_url": "https://api.github.com/users/test-bot%5Bbot%5D/following{/other_user}", "gists_url": "https://api.github.com/users/test-bot%5Bbot%5D/gists{/gist_id}", "starred_url": "https://api.github.com/users/test-bot%5Bbot%5D/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/test-bot%5Bbot%5D/subscriptions", "organizations_url": "https://api.github.com/users/test-bot%5Bbot%5D/orgs", "repos_url": "https://api.github.com/users/test-bot%5Bbot%5D/repos", "events_url": "https://api.github.com/users/test-bot%5Bbot%5D/events{/privacy}", "received_events_url": "https://api.github.com/users/test-bot%5Bbot%5D/received_events", "type": "Bot", "user_view_type": "public", "site_admin": False, }, "created_at": "2025-09-02T15:20:38Z", "updated_at": "2025-09-02T15:21:23Z", "body": '# 📸 Snapshot Test \n## Base build not found\nNo build was found for the base commit <a href="https://www.github.com/test-org/test-repo/commit/abc123def456?utm_source=github&utm_medium=vcs" target="_blank" rel="noreferrer noopener">abc123d</a>. This is required to generate a snapshot diff for your pull request.\n\nIt\'s possible that you created a branch off the base commit before all of the CI steps have finished processing, e.g. the one that uploads a build to our system. If that\'s the case, no problem! Just wait and this will eventually resolve.\n\n---\n\n:flying_saucer: Powered by <a href="https://www.example.com/?utm_source=github&utm_medium=vcs" target="_blank" rel="noreferrer noopener">Example Tools</a>', "author_association": "NONE", "reactions": { "url": "https://api.github.com/repos/test-org/test-repo/issues/comments/1111111111/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0, }, "performed_via_github_app": { "id": 12345, "client_id": "Iv1.aaaabbbbccccdddd", "slug": "test-bot", "node_id": "MDM6QXBwOTI0NjU=", "owner": { "login": "TestOrg", "id": 22222222, "node_id": "MDEyOk9yZ2FuaXphdGlvbjc0MDMzNDg2", "avatar_url": "https://avatars.githubusercontent.com/u/22222222?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TestOrg", "html_url": "https://github.com/TestOrg", "followers_url": "https://api.github.com/users/TestOrg/followers", "following_url": "https://api.github.com/users/TestOrg/following{/other_user}", "gists_url": "https://api.github.com/users/TestOrg/gists{/gist_id}", "starred_url": "https://api.github.com/users/TestOrg/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TestOrg/subscriptions", "organizations_url": "https://api.github.com/users/TestOrg/orgs", "repos_url": "https://api.github.com/users/TestOrg/repos", "events_url": "https://api.github.com/users/TestOrg/events{/privacy}", "received_events_url": "https://api.github.com/users/TestOrg/received_events", "type": "Organization", "user_view_type": "public", "site_admin": False, }, "name": "Test Bot", "description": "Test bot for continuous integration", "external_url": "https://www.example.com/", "html_url": "https://github.com/apps/test-bot", "created_at": "2020-12-11T22:38:09Z", "updated_at": "2022-04-19T03:13:33Z", "permissions": { "checks": "write", "metadata": "read", "pull_requests": "write", }, "events": ["pull_request"], }, }, { "url": "https://api.github.com/repos/test-org/test-repo/issues/comments/2222222222", "html_url": "https://github.com/test-org/test-repo/pull/123#issuecomment-2222222222", "issue_url": "https://api.github.com/repos/test-org/test-repo/issues/123", "id": 2222222222, "node_id": "IC_kwDOKLDwMM7BePlk", "user": { "login": "testuser", "id": 33333333, "node_id": "MDQ6VXNlcjMzMjU5Nw==", "avatar_url": "https://avatars.githubusercontent.com/u/33333333?v=4", "gravatar_id": "", "url": "https://api.github.com/users/testuser", "html_url": "https://github.com/testuser", "followers_url": "https://api.github.com/users/testuser/followers", "following_url": "https://api.github.com/users/testuser/following{/other_user}", "gists_url": "https://api.github.com/users/testuser/gists{/gist_id}", "starred_url": "https://api.github.com/users/testuser/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/testuser/subscriptions", "organizations_url": "https://api.github.com/users/testuser/orgs", "repos_url": "https://api.github.com/users/testuser/repos", "events_url": "https://api.github.com/users/testuser/events{/privacy}", "received_events_url": "https://api.github.com/users/testuser/received_events", "type": "User", "user_view_type": "public", "site_admin": False, }, "created_at": "2025-09-02T15:58:45Z", "updated_at": "2025-09-02T15:58:45Z", "body": "This seems to be working but the size and snapshots are just broken on main.", "author_association": "MEMBER", "reactions": { "url": "https://api.github.com/repos/test-org/test-repo/issues/comments/2222222222/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0, }, "performed_via_github_app": None, }, ] comment1 = IssueComment.parse_obj(raw_comments[0]) assert isinstance(comment1, IssueComment) assert comment1.id == 1111111111 assert comment1.user is not None assert comment1.user.login == "test-bot[bot]" assert comment1.user.type == "Bot" assert "Snapshot Test" in comment1.body assert comment1.author_association == AuthorAssociation.NONE assert comment1.reactions is not None assert comment1.reactions.total_count == 0 comment2 = IssueComment.parse_obj(raw_comments[1]) assert isinstance(comment2, IssueComment) assert comment2.id == 2222222222 assert comment2.user is not None assert comment2.user.login == "testuser" assert comment2.user.type == "User" assert ( comment2.body == "This seems to be working but the size and snapshots are just broken on main." ) assert comment2.author_association == AuthorAssociation.MEMBER def test_parse_github_review_comments_real_data(self): """Test parsing real GitHub review comments from test-org/test-repo PR.""" raw_comments = [ { "url": "https://api.github.com/repos/test-org/test-repo/pulls/comments/4444444444", "pull_request_review_id": 5555555555, "id": 4444444444, "node_id": "PRRC_kwDOKLDwMM6KE1-e", "diff_hunk": "@@ -1,3 +1,11 @@\n+GIT", "path": "ios/Gemfile.lock", "commit_id": "abc123def456789", "original_commit_id": "abc123def456789", "user": { "login": "reviewer1", "id": 44444444, "node_id": "MDQ6VXNlcjE0NDc3OTg=", "avatar_url": "https://avatars.githubusercontent.com/u/44444444?v=4", "gravatar_id": "", "url": "https://api.github.com/users/reviewer1", "html_url": "https://github.com/reviewer1", "followers_url": "https://api.github.com/users/reviewer1/followers", "following_url": "https://api.github.com/users/reviewer1/following{/other_user}", "gists_url": "https://api.github.com/users/reviewer1/gists{/gist_id}", "starred_url": "https://api.github.com/users/reviewer1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/reviewer1/subscriptions", "organizations_url": "https://api.github.com/users/reviewer1/orgs", "repos_url": "https://api.github.com/users/reviewer1/repos", "events_url": "https://api.github.com/users/reviewer1/events{/privacy}", "received_events_url": "https://api.github.com/users/reviewer1/received_events", "type": "User", "user_view_type": "public", "site_admin": False, }, "body": "What is this for? Can we not bump `fastlane-plugin-sentry` like usual?", "created_at": "2025-09-02T15:56:59Z", "updated_at": "2025-09-02T16:01:20Z", "html_url": "https://github.com/test-org/test-repo/pull/554#discussion_r4444444444", "pull_request_url": "https://api.github.com/repos/test-org/test-repo/pulls/554", "author_association": "MEMBER", "_links": { "self": { "href": "https://api.github.com/repos/test-org/test-repo/pulls/comments/4444444444" }, "html": { "href": "https://github.com/test-org/test-repo/pull/554#discussion_r4444444444" }, "pull_request": { "href": "https://api.github.com/repos/test-org/test-repo/pulls/554" }, }, "reactions": { "url": "https://api.github.com/repos/test-org/test-repo/pulls/comments/4444444444/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0, }, "start_line": None, "original_start_line": None, "start_side": None, "line": 1, "original_line": 1, "side": "RIGHT", "original_position": 1, "position": 1, "subject_type": "line", }, { "url": "https://api.github.com/repos/test-org/test-repo/pulls/comments/6666666666", "pull_request_review_id": 7777777777, "id": 6666666666, "node_id": "PRRC_kwDOKLDwMM6KFYov", "diff_hunk": "@@ -1,3 +1,11 @@\n+GIT", "path": "ios/Gemfile.lock", "commit_id": "abc123def456789", "original_commit_id": "abc123def456789", "user": { "login": "reviewer2", "id": 55555555, "node_id": "MDQ6VXNlcjMzMjU5Nw==", "avatar_url": "https://avatars.githubusercontent.com/u/55555555?v=4", "gravatar_id": "", "url": "https://api.github.com/users/reviewer2", "html_url": "https://github.com/reviewer2", "followers_url": "https://api.github.com/users/reviewer2/followers", "following_url": "https://api.github.com/users/reviewer2/following{/other_user}", "gists_url": "https://api.github.com/users/reviewer2/gists{/gist_id}", "starred_url": "https://api.github.com/users/reviewer2/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/reviewer2/subscriptions", "organizations_url": "https://api.github.com/users/reviewer2/orgs", "repos_url": "https://api.github.com/users/reviewer2/repos", "events_url": "https://api.github.com/users/reviewer2/events{/privacy}", "received_events_url": "https://api.github.com/users/reviewer2/received_events", "type": "User", "user_view_type": "public", "site_admin": False, }, "body": "It hasn't been released yet so this just points it to the tip of master", "created_at": "2025-09-02T16:57:27Z", "updated_at": "2025-09-02T16:57:27Z", "html_url": "https://github.com/test-org/test-repo/pull/554#discussion_r6666666666", "pull_request_url": "https://api.github.com/repos/test-org/test-repo/pulls/554", "author_association": "MEMBER", "_links": { "self": { "href": "https://api.github.com/repos/test-org/test-repo/pulls/comments/6666666666" }, "html": { "href": "https://github.com/test-org/test-repo/pull/554#discussion_r6666666666" }, "pull_request": { "href": "https://api.github.com/repos/test-org/test-repo/pulls/554" }, }, "reactions": { "url": "https://api.github.com/repos/test-org/test-repo/pulls/comments/6666666666/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0, }, "start_line": None, "original_start_line": None, "start_side": None, "line": 1, "original_line": 1, "side": "RIGHT", "in_reply_to_id": 4444444444, "original_position": 1, "position": 1, "subject_type": "line", }, { "url": "https://api.github.com/repos/test-org/test-repo/pulls/comments/8888888888", "pull_request_review_id": 9999999999, "id": 8888888888, "node_id": "PRRC_kwDOKLDwMM6KPRDd", "diff_hunk": "@@ -168,6 +168,11 @@ platform :ios do\n project_slug: 'hackernews-ios',\n include_sources: true\n )\n+ sentry_upload_mobile_app(", "path": "ios/fastlane/Fastfile", "commit_id": "abc123def456789", "original_commit_id": "abc123def456789", "user": { "login": "reviewer1", "id": 44444444, "node_id": "MDQ6VXNlcjE0NDc3OTg=", "avatar_url": "https://avatars.githubusercontent.com/u/44444444?v=4", "gravatar_id": "", "url": "https://api.github.com/users/reviewer1", "html_url": "https://github.com/reviewer1", "followers_url": "https://api.github.com/users/reviewer1/followers", "following_url": "https://api.github.com/users/reviewer1/following{/other_user}", "gists_url": "https://api.github.com/users/reviewer1/gists{/gist_id}", "starred_url": "https://api.github.com/users/reviewer1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/reviewer1/subscriptions", "organizations_url": "https://api.github.com/users/reviewer1/orgs", "repos_url": "https://api.github.com/users/reviewer1/repos", "events_url": "https://api.github.com/users/reviewer1/events{/privacy}", "received_events_url": "https://api.github.com/users/reviewer1/received_events", "type": "User", "user_view_type": "public", "site_admin": False, }, "body": "on second thought lets roll with this so we don't have to build the app so many times", "created_at": "2025-09-03T14:57:08Z", "updated_at": "2025-09-03T14:57:09Z", "html_url": "https://github.com/test-org/test-repo/pull/554#discussion_r8888888888", "pull_request_url": "https://api.github.com/repos/test-org/test-repo/pulls/554", "author_association": "MEMBER", "_links": { "self": { "href": "https://api.github.com/repos/test-org/test-repo/pulls/comments/8888888888" }, "html": { "href": "https://github.com/test-org/test-repo/pull/554#discussion_r8888888888" }, "pull_request": { "href": "https://api.github.com/repos/test-org/test-repo/pulls/554" }, }, "reactions": { "url": "https://api.github.com/repos/test-org/test-repo/pulls/comments/8888888888/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0, }, "start_line": None, "original_start_line": None, "start_side": None, "line": 171, "original_line": 171, "side": "RIGHT", "in_reply_to_id": 1010101010, "original_position": 4, "position": 4, "subject_type": "line", }, ] comment1 = ReviewComment.parse_obj(raw_comments[0]) assert isinstance(comment1, ReviewComment) assert comment1.id == 4444444444 assert comment1.path == "ios/Gemfile.lock" assert comment1.user.login == "reviewer1" assert comment1.line == 1 assert comment1.side == ReviewCommentSide.RIGHT assert ( comment1.body == "What is this for? Can we not bump `fastlane-plugin-sentry` like usual?" ) assert comment1.diff_hunk == "@@ -1,3 +1,11 @@\n+GIT" assert comment1.in_reply_to_id is None comment2 = ReviewComment.parse_obj(raw_comments[1]) assert isinstance(comment2, ReviewComment) assert comment2.id == 6666666666 assert comment2.path == "ios/Gemfile.lock" assert comment2.user.login == "reviewer2" assert ( comment2.body == "It hasn't been released yet so this just points it to the tip of master" ) assert comment2.in_reply_to_id == 4444444444 # Reply to first comment comment3 = ReviewComment.parse_obj(raw_comments[2]) assert isinstance(comment3, ReviewComment) assert comment3.id == 8888888888 assert comment3.path == "ios/fastlane/Fastfile" assert comment3.line == 171 assert comment3.reactions is not None assert comment3.reactions.total_count == 1 assert comment3.reactions.plus_one == 1 assert comment3.in_reply_to_id == 1010101010
TestPullRequestCommentTypes
python
python-jsonschema__jsonschema
jsonschema/tests/test_exceptions.py
{ "start": 22465, "end": 22604 }
class ____(TestCase): def test_hashable(self): {exceptions.ValidationError("")} {exceptions.SchemaError("")}
TestHashable
python
getsentry__sentry
tests/sentry/issues/endpoints/test_group_similar_issues_embeddings.py
{ "start": 1682, "end": 31616 }
class ____(APITestCase): def setUp(self) -> None: super().setUp() self.login_as(self.user) self.org = self.create_organization(owner=self.user) self.project = self.create_project(organization=self.org) self.base_error_trace = { "fingerprint": ["my-route", "{{ default }}"], "exception": { "values": [ { "stacktrace": { "frames": [ { "function": "divide_by_zero", "module": "__main__", "filename": "python_onboarding.py", "abs_path": "/Users/jodi/python_onboarding/python_onboarding.py", "lineno": 20, "context_line": " divide = 1/0", "in_app": True, }, ] }, "type": "ZeroDivisionError", "value": "division by zero", } ] }, "platform": "python", } self.event = self.store_event(data=self.base_error_trace, project_id=self.project) self.group = self.event.group assert self.group self.path = f"/api/0/issues/{self.group.id}/similar-issues-embeddings/" self.similar_event = self.store_event( data={"message": "Dogs are great!"}, project_id=self.project ) def get_expected_response( self, group_ids: Sequence[int], exception_distances: Sequence[float], should_be_grouped: Sequence[str], ) -> Sequence[tuple[Any, Mapping[str, Any]]]: serialized_groups = serialize( list(Group.objects.get_many_from_cache(group_ids)), user=self.user ) response = [] for i, group in enumerate(serialized_groups): response.append( ( group, { "exception": exception_distances[i], "shouldBeGrouped": should_be_grouped[i], }, ) ) return response def test_get_formatted_results(self) -> None: event_from_second_similar_group = save_new_event( {"message": "Adopt don't shop"}, self.project ) assert self.similar_event.group_id is not None similar_issue_data_1 = SeerSimilarIssueData( parent_group_id=self.similar_event.group_id, parent_hash=self.similar_event.get_primary_hash(), should_group=True, stacktrace_distance=0.00001, ) assert event_from_second_similar_group.group_id similar_issue_data_2 = SeerSimilarIssueData( parent_group_id=event_from_second_similar_group.group_id, parent_hash=event_from_second_similar_group.get_primary_hash(), should_group=False, stacktrace_distance=0.23, ) group_similar_endpoint = GroupSimilarIssuesEmbeddingsEndpoint() formatted_results = group_similar_endpoint.get_formatted_results( similar_issues_data=[similar_issue_data_1, similar_issue_data_2], user=self.user, group=self.group, ) assert formatted_results == self.get_expected_response( [ self.similar_event.group_id, event_from_second_similar_group.group_id, ], [1.0000, 0.7700], ["Yes", "No"], ) @mock.patch("sentry.seer.similarity.similar_issues.metrics.incr") @mock.patch("sentry.seer.similarity.similar_issues.seer_grouping_connection_pool.urlopen") @mock.patch("sentry.issues.endpoints.group_similar_issues_embeddings.logger") def test_simple( self, mock_logger: mock.MagicMock, mock_seer_request: mock.MagicMock, mock_metrics_incr: mock.MagicMock, ) -> None: seer_return_value: SimilarIssuesEmbeddingsResponse = { "responses": [ { "parent_hash": self.similar_event.get_primary_hash(), "should_group": True, "stacktrace_distance": 0.01, } ] } mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200) response = self.client.get( self.path, data={"k": "1", "threshold": "0.01"}, ) assert self.similar_event.group_id is not None assert response.data == self.get_expected_response( [self.similar_event.group_id], [0.99], ["Yes"] ) expected_seer_request_params = { "threshold": 0.01, "event_id": self.group.get_latest_event().event_id, "hash": self.event.get_primary_hash(), "project_id": self.project.id, "stacktrace": EXPECTED_STACKTRACE_STRING, "exception_type": "ZeroDivisionError", "read_only": True, "referrer": "similar_issues", "use_reranking": True, "model": "v1", "training_mode": False, "k": 1, } mock_seer_request.assert_called_with( "POST", SEER_SIMILAR_ISSUES_URL, retries=options.get("seer.similarity.grouping-ingest-retries"), timeout=options.get("seer.similarity.grouping-ingest-timeout"), body=orjson.dumps(expected_seer_request_params), headers={"content-type": "application/json;charset=utf-8"}, ) mock_logger.info.assert_called_with( "Similar issues embeddings parameters", extra=expected_seer_request_params ) mock_metrics_incr.assert_any_call( "seer.similar_issues_request", sample_rate=options.get("seer.similarity.metrics_sample_rate"), tags={ "response_status": 200, "outcome": "matching_group_found", "referrer": "similar_issues", }, ) @mock.patch("sentry.seer.similarity.similar_issues.seer_grouping_connection_pool.urlopen") def test_simple_threads(self, mock_seer_request: mock.MagicMock) -> None: event = self.store_event(data=EVENT_WITH_THREADS_STACKTRACE, project_id=self.project) data = { "parent_hash": self.similar_event.get_primary_hash(), "should_group": True, "stacktrace_distance": 0.01, } mock_seer_request.return_value = HTTPResponse( orjson.dumps({"responses": [data]}), status=200 ) assert event.group path = f"/api/0/issues/{event.group.id}/similar-issues-embeddings/" response = self.client.get(path, data={"k": "1", "threshold": "0.01"}) assert self.similar_event.group_id is not None assert response.data == self.get_expected_response( [self.similar_event.group_id], [0.99], ["Yes"] ) @mock.patch("sentry.analytics.record") @mock.patch("sentry.seer.similarity.similar_issues.seer_grouping_connection_pool.urlopen") def test_multiple(self, mock_seer_request: mock.MagicMock, mock_record: mock.MagicMock) -> None: over_threshold_group_event = save_new_event({"message": "Maisey is silly"}, self.project) under_threshold_group_event = save_new_event({"message": "Charlie is goofy"}, self.project) seer_return_value: SimilarIssuesEmbeddingsResponse = { "responses": [ { "parent_hash": self.similar_event.get_primary_hash(), "should_group": True, "stacktrace_distance": 0.002, # Over threshold }, { "parent_hash": over_threshold_group_event.get_primary_hash(), "should_group": True, "stacktrace_distance": 0.002, # Over threshold }, { "parent_hash": under_threshold_group_event.get_primary_hash(), "should_group": False, "stacktrace_distance": 0.05, # Under threshold }, ] } mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value)) response = self.client.get( self.path, data={"k": "1", "threshold": "0.01"}, ) assert self.similar_event.group_id is not None assert over_threshold_group_event.group_id is not None assert under_threshold_group_event.group_id is not None assert response.data == self.get_expected_response( [ self.similar_event.group_id, over_threshold_group_event.group_id, under_threshold_group_event.group_id, ], [0.998, 0.998, 0.95], ["Yes", "Yes", "No"], ) assert_last_analytics_event( mock_record, GroupSimilarIssuesEmbeddingsCountEvent( organization_id=self.org.id, project_id=self.project.id, hash=self.event.get_primary_hash(), group_id=self.group.id, count_over_threshold=2, user_id=self.user.id, ), ) @mock.patch("sentry.seer.similarity.similar_issues.seer_grouping_connection_pool.urlopen") def test_parent_hash_in_group_hashes(self, mock_seer_request: mock.MagicMock) -> None: """ Test that the request group's hashes are filtered out of the returned similar parent hashes """ seer_return_value: Any = { "responses": [ # Make the group's own hash the returned parent hash { "parent_hash": self.event.get_primary_hash(), "should_group": True, "stacktrace_distance": 0.01, }, { "parent_hash": self.similar_event.get_primary_hash(), "should_group": True, "stacktrace_distance": 0.01, }, ] } mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200) response = self.client.get(self.path) assert self.similar_event.group_id is not None assert response.data == self.get_expected_response( [self.similar_event.group_id], [0.99], ["Yes"] ) @mock.patch("sentry.seer.similarity.similar_issues.metrics.incr") @mock.patch("sentry.seer.similarity.similar_issues.logger") @mock.patch("sentry.seer.similarity.similar_issues.seer_grouping_connection_pool.urlopen") def test_incomplete_return_data( self, mock_seer_request: mock.MagicMock, mock_logger: mock.MagicMock, mock_metrics_incr: mock.MagicMock, ) -> None: # Two suggested groups, one with valid data, one missing parent hash. We should log the # second and return the first. seer_return_value: Any = { "responses": [ { "parent_hash": self.similar_event.get_primary_hash(), "should_group": True, "stacktrace_distance": 0.01, }, { # missing parent hash "should_group": True, "stacktrace_distance": 0.01, }, ] } mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200) response = self.client.get(self.path) mock_logger.exception.assert_called_with( "Seer similar issues response entry missing key 'parent_hash'", extra={ "request_params": { "event_id": self.group.get_latest_event().event_id, "hash": self.event.get_primary_hash(), "project_id": self.project.id, "stacktrace": EXPECTED_STACKTRACE_STRING, "exception_type": "ZeroDivisionError", "read_only": True, "referrer": "similar_issues", "use_reranking": True, "model": "v1", "training_mode": False, }, "raw_similar_issue_data": { "should_group": True, "stacktrace_distance": 0.01, }, }, ) mock_metrics_incr.assert_any_call( "seer.similar_issues_request", sample_rate=options.get("seer.similarity.metrics_sample_rate"), tags={ "response_status": 200, "outcome": "error", "error": "IncompleteSeerDataError", "referrer": "similar_issues", }, ) assert self.similar_event.group_id is not None assert response.data == self.get_expected_response( [self.similar_event.group_id], [0.99], ["Yes"] ) @mock.patch("sentry.seer.similarity.similar_issues.delete_seer_grouping_records_by_hash") @mock.patch("sentry.seer.similarity.similar_issues.metrics.incr") @mock.patch("sentry.seer.similarity.similar_issues.logger") @mock.patch("sentry.seer.similarity.similar_issues.seer_grouping_connection_pool.urlopen") def test_nonexistent_grouphash( self, mock_seer_similarity_request: mock.MagicMock, mock_logger: mock.MagicMock, mock_metrics_incr: mock.MagicMock, mock_seer_deletion_request: mock.MagicMock, ) -> None: """ The seer API can return grouphashes that do not exist if their groups have been deleted/merged. Test info about these groups is not returned. """ seer_return_value: SimilarIssuesEmbeddingsResponse = { # Two suggested groups, one with valid data, one pointing to a group that doesn't exist. # We should log the second and return the first. "responses": [ { "parent_hash": self.similar_event.get_primary_hash(), "should_group": True, "stacktrace_distance": 0.01, }, { "parent_hash": "not a real hash", "should_group": True, "stacktrace_distance": 0.01, }, ] } mock_seer_similarity_request.return_value = HTTPResponse( orjson.dumps(seer_return_value), status=200 ) response = self.client.get(self.path) mock_metrics_incr.assert_any_call( "seer.similar_issues_request", sample_rate=options.get("seer.similarity.metrics_sample_rate"), tags={ "response_status": 200, "outcome": "error", "error": "SimilarHashNotFoundError", "referrer": "similar_issues", }, ) assert self.similar_event.group_id assert response.data == self.get_expected_response( [self.similar_event.group_id], [0.99], ["Yes"] ) mock_logger.warning.assert_called_with( "get_similarity_data_from_seer.parent_hash_not_found", extra={ "hash": self.event.get_primary_hash(), "parent_hash": "not a real hash", "project_id": self.project.id, "event_id": self.event.event_id, }, ) mock_seer_deletion_request.delay.assert_called_with(self.project.id, ["not a real hash"]) @mock.patch("sentry.seer.similarity.similar_issues.metrics.incr") @mock.patch("sentry.seer.similarity.similar_issues.logger") @mock.patch("sentry.seer.similarity.similar_issues.seer_grouping_connection_pool.urlopen") def test_grouphash_with_no_group( self, mock_seer_similarity_request: mock.MagicMock, mock_logger: mock.MagicMock, mock_metrics_incr: mock.MagicMock, ) -> None: """ The seer API can return hashes which don't have a group id attached. Test that these groups are not returned. """ existing_grouphash = GroupHash.objects.create(hash="dogs are great", project=self.project) assert existing_grouphash.group_id is None # Create metadata for the grouphash so it has a creation date GroupHashMetadata.objects.get_or_create(grouphash=existing_grouphash) seer_return_value: SimilarIssuesEmbeddingsResponse = { "responses": [ { "parent_hash": "dogs are great", "should_group": True, "stacktrace_distance": 0.01, }, ] } mock_seer_similarity_request.return_value = HTTPResponse( orjson.dumps(seer_return_value), status=200 ) response = self.client.get(self.path) mock_metrics_incr.assert_any_call( "seer.similar_issues_request", sample_rate=options.get("seer.similarity.metrics_sample_rate"), tags={ "response_status": 200, "outcome": "error", "error": "SimilarHashMissingGroupError", "referrer": "similar_issues", }, ) assert response.data == [] mock_logger.warning.assert_called_with( "get_similarity_data_from_seer.parent_hash_missing_group", extra={ "hash": self.event.get_primary_hash(), "parent_hash": "dogs are great", "parent_gh_age_in_sec": mock.ANY, # See below "project_id": self.project.id, "event_id": self.event.event_id, }, ) logged_gh_age = mock_logger.warning.call_args.kwargs["extra"]["parent_gh_age_in_sec"] assert isinstance(logged_gh_age, float) # Note that unlike in the missing grouphash test below, we're not testing Seer deletion here # because it only happens conditionally, behavior that's tested in `test_similar_issues.py` @mock.patch("sentry.analytics.record") @mock.patch("sentry.seer.similarity.similar_issues.seer_grouping_connection_pool.urlopen") def test_empty_seer_return( self, mock_seer_request: mock.MagicMock, mock_record: mock.MagicMock ) -> None: mock_seer_request.return_value = HTTPResponse([], status=200) response = self.client.get(self.path) assert response.data == [] assert_last_analytics_event( mock_record, GroupSimilarIssuesEmbeddingsCountEvent( organization_id=self.org.id, project_id=self.project.id, hash=self.event.get_primary_hash(), group_id=self.group.id, count_over_threshold=0, user_id=self.user.id, ), ) def test_no_contributing_exception(self) -> None: data_no_contributing_exception = { "fingerprint": ["message"], "message": "Message", "exception": { "values": [ { "stacktrace": { "frames": [ { "function": "divide_by_zero", "module": "__main__", "filename": "python_onboarding.py", "abs_path": "/Users/jodi/python_onboarding/python_onboarding.py", "lineno": 20, "context_line": " divide = 1/0", "in_app": False, }, ] }, "type": "ZeroDivisionError", "value": "division by zero", } ] }, "platform": "python", } event_no_contributing_exception = self.store_event( data=data_no_contributing_exception, project_id=self.project ) group_no_contributing_exception = event_no_contributing_exception.group assert group_no_contributing_exception response = self.client.get( f"/api/0/issues/{group_no_contributing_exception.id}/similar-issues-embeddings/", data={"k": "1", "threshold": "0.98"}, ) assert response.data == [] def test_no_exception(self) -> None: event_no_exception = self.store_event(data={}, project_id=self.project) group_no_exception = event_no_exception.group assert group_no_exception response = self.client.get( f"/api/0/issues/{group_no_exception.id}/similar-issues-embeddings/", data={"k": "1", "threshold": "0.98"}, ) assert response.data == [] @mock.patch("sentry.models.group.Group.get_latest_event") def test_no_latest_event(self, mock_get_latest_event: mock.MagicMock) -> None: mock_get_latest_event.return_value = None response = self.client.get( f"/api/0/issues/{self.group.id}/similar-issues-embeddings/", data={"k": "1", "threshold": "0.98"}, ) assert response.data == [] @mock.patch("sentry.issues.endpoints.group_similar_issues_embeddings.get_stacktrace_string") def test_no_stacktrace_string(self, mock_get_stacktrace_string: mock.MagicMock) -> None: mock_get_stacktrace_string.return_value = "" response = self.client.get( f"/api/0/issues/{self.group.id}/similar-issues-embeddings/", data={"k": "1", "threshold": "0.98"}, ) assert response.data == [] @mock.patch("sentry.seer.similarity.similar_issues.seer_grouping_connection_pool.urlopen") def test_no_optional_params(self, mock_seer_request: mock.MagicMock) -> None: """ Test that optional parameters, k, threshold, and read_only can not be included. """ seer_return_value: SimilarIssuesEmbeddingsResponse = { "responses": [ { "parent_hash": self.similar_event.get_primary_hash(), "should_group": True, "stacktrace_distance": 0.01, } ] } mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value)) # Include no optional parameters response = self.client.get( self.path, # optional params would be here ) assert self.similar_event.group_id is not None assert response.data == self.get_expected_response( [self.similar_event.group_id], [0.99], ["Yes"] ) mock_seer_request.assert_called_with( "POST", SEER_SIMILAR_ISSUES_URL, retries=options.get("seer.similarity.grouping-ingest-retries"), timeout=options.get("seer.similarity.grouping-ingest-timeout"), body=orjson.dumps( { "threshold": 0.01, "event_id": self.group.get_latest_event().event_id, "hash": self.event.get_primary_hash(), "project_id": self.project.id, "stacktrace": EXPECTED_STACKTRACE_STRING, "exception_type": "ZeroDivisionError", "read_only": True, "referrer": "similar_issues", "use_reranking": True, "model": "v1", "training_mode": False, }, ), headers={"content-type": "application/json;charset=utf-8"}, ) # Include k response = self.client.get(self.path, data={"k": 1}) assert self.similar_event.group_id is not None assert response.data == self.get_expected_response( [self.similar_event.group_id], [0.99], ["Yes"] ) mock_seer_request.assert_called_with( "POST", SEER_SIMILAR_ISSUES_URL, retries=options.get("seer.similarity.grouping-ingest-retries"), timeout=options.get("seer.similarity.grouping-ingest-timeout"), body=orjson.dumps( { "threshold": 0.01, "event_id": self.group.get_latest_event().event_id, "hash": self.event.get_primary_hash(), "project_id": self.project.id, "stacktrace": EXPECTED_STACKTRACE_STRING, "exception_type": "ZeroDivisionError", "read_only": True, "referrer": "similar_issues", "use_reranking": True, "model": "v1", "training_mode": False, "k": 1, }, ), headers={"content-type": "application/json;charset=utf-8"}, ) # Include threshold response = self.client.get( self.path, data={"threshold": "0.01"}, ) assert response.data == self.get_expected_response( [self.similar_event.group_id], [0.99], ["Yes"] ) mock_seer_request.assert_called_with( "POST", SEER_SIMILAR_ISSUES_URL, retries=options.get("seer.similarity.grouping-ingest-retries"), timeout=options.get("seer.similarity.grouping-ingest-timeout"), body=orjson.dumps( { "threshold": 0.01, "event_id": self.group.get_latest_event().event_id, "hash": self.event.get_primary_hash(), "project_id": self.project.id, "stacktrace": EXPECTED_STACKTRACE_STRING, "exception_type": "ZeroDivisionError", "read_only": True, "referrer": "similar_issues", "use_reranking": True, "model": "v1", "training_mode": False, }, ), headers={"content-type": "application/json;charset=utf-8"}, ) @mock.patch("sentry.seer.similarity.similar_issues.seer_grouping_connection_pool.urlopen") def test_obeys_useReranking_query_param(self, mock_seer_request: mock.MagicMock) -> None: for incoming_value, outgoing_value in [("true", True), ("false", False)]: self.client.get(self.path, data={"useReranking": incoming_value}) assert mock_seer_request.call_count == 1 request_params = orjson.loads(mock_seer_request.call_args.kwargs["body"]) assert request_params["use_reranking"] == outgoing_value mock_seer_request.reset_mock() def test_too_many_frames(self) -> None: error_type = "FailedToFetchError" error_value = "Charlie didn't bring the ball back" context_line = f"raise {error_type}('{error_value}')" error_data = { "exception": { "values": [ { "type": error_type, "value": error_value, "stacktrace": { "frames": [ { "function": f"play_fetch_{i}", "filename": f"dogpark{i}.py", "context_line": context_line, } for i in range(MAX_FRAME_COUNT + 1) ] }, } ] }, "platform": "java", } new_event = self.store_event(data=error_data, project_id=self.project) assert new_event.group response = self.client.get( path=f"/api/0/issues/{new_event.group.id}/similar-issues-embeddings/", data={"k": "1", "threshold": "0.01"}, ) assert response.data == [] def test_no_filename_or_module(self) -> None: error_type = "FailedToFetchError" error_value = "Charlie didn't bring the ball back" context_line = f"raise {error_type}('{error_value}')" error_data = { "exception": { "values": [ { "type": error_type, "value": error_value, "stacktrace": { "frames": [ { "function": f"play_fetch_{i}", "context_line": context_line, } for i in range(MAX_FRAME_COUNT + 1) ] }, } ] }, "platform": "python", } new_event = self.store_event(data=error_data, project_id=self.project) assert new_event.group response = self.client.get( path=f"/api/0/issues/{new_event.group.id}/similar-issues-embeddings/", data={"k": "1", "threshold": "0.01"}, ) assert response.data == []
GroupSimilarIssuesEmbeddingsTest
python
numpy__numpy
numpy/_core/tests/test_dtype.py
{ "start": 77388, "end": 79124 }
class ____: @pytest.mark.leaks_references(reason="dynamically creates custom dtype.") @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") def test_custom_structured_dtype(self): class mytype: pass blueprint = np.dtype([("field", object)]) dt = create_custom_field_dtype(blueprint, mytype, 0) assert dt.type == mytype # We cannot (currently) *create* this dtype with `np.dtype` because # mytype does not inherit from `np.generic`. This seems like an # unnecessary restriction, but one that has been around forever: assert np.dtype(mytype) == np.dtype("O") if HAS_REFCOUNT: # Create an array and test that memory gets cleaned up (gh-25949) o = object() startcount = sys.getrefcount(o) a = np.array([o], dtype=dt) del a assert sys.getrefcount(o) == startcount @pytest.mark.thread_unsafe(reason="crashes when GIL disabled, dtype setup is thread-unsafe") def test_custom_structured_dtype_errors(self): class mytype: pass blueprint = np.dtype([("field", object)]) with pytest.raises(ValueError): # Tests what happens if fields are unset during creation # which is currently rejected due to the containing object # (see PyArray_RegisterDataType). create_custom_field_dtype(blueprint, mytype, 1) with pytest.raises(RuntimeError): # Tests that a dtype must have its type field set up to np.dtype # or in this case a builtin instance. create_custom_field_dtype(blueprint, mytype, 2)
TestUserDType
python
vyperlang__vyper
vyper/semantics/analysis/imports.py
{ "start": 976, "end": 2430 }
class ____: # the current path in the import graph traversal _path: list[vy_ast.Module] = dc.field(default_factory=list) # stack of dicts, each item in the stack is a dict keeping # track of imports in the current module _imports: list[dict] = dc.field(default_factory=list) @property def imported_modules(self): return self._imports[-1] @property def current_module(self): return self._path[-1] def push_path(self, module_ast: vy_ast.Module) -> None: if module_ast in self._path: cycle = self._path + [module_ast] raise ImportCycle(" imports ".join(f'"{t.path}"' for t in cycle)) self._path.append(module_ast) self._imports.append({}) def pop_path(self, expected: vy_ast.Module) -> None: popped = self._path.pop() assert expected is popped, "unreachable" self._imports.pop() @contextlib.contextmanager def enter_path(self, module_ast: vy_ast.Module) -> Iterator[None]: self.push_path(module_ast) try: yield finally: self.pop_path(module_ast) def try_parse_abi(file_input: FileInput) -> CompilerInput: try: s = json.loads(file_input.source_code) if isinstance(s, dict) and "abi" in s: s = s["abi"] return JSONInput(**asdict(file_input), data=s) except (ValueError, TypeError): return file_input
_ImportGraph
python
dagster-io__dagster
examples/docs_snippets/docs_snippets/guides/observe/alerts/generate_alerts_config_code_snippets.py
{ "start": 602, "end": 6104 }
class ____(NamedTuple): condition_description: str alert_name: str event_types: Sequence[str] config_snippet: Optional[Mapping[str, Any]] NOTIFICATION_SERVICES = sorted( [ NotificationService( name="email", label="Email", effect_description="an email", config_snippet={ "email_addresses": [ "richard.hendricks@hooli.com", "nelson.bighetti@hooli.com", ] }, ), NotificationService( name="slack", label="Slack", effect_description="a Slack message", config_snippet={ "slack_workspace_name": "hooli", "slack_channel_name": "notifications", }, ), NotificationService( name="pagerduty", label="PagerDuty", effect_description="a PagerDuty alert", config_snippet={"integration_key": "<pagerduty_integration_key>"}, ), NotificationService( name="microsoft_teams", label="Microsoft Teams", effect_description="a Microsoft Teams webhook", config_snippet={"webhook_url": "https://yourdomain.webhook.office.com/..."}, ), ], key=lambda x: x.name, ) ALERT_TYPES = [ AlertType( condition_description="when a run fails", alert_name="run-alert-failure", event_types=["JOB_FAILURE"], config_snippet={"tags": {"important": "true"}}, ), AlertType( condition_description="when a run is taking too long to complete", alert_name="job-running-over-one-hour", event_types=["JOB_LONG_RUNNING"], config_snippet={ "alert_targets": [ {"long_running_job_threshold_target": {"threshold_seconds": 3600}} ], "tags": {"important": "true"}, }, ), AlertType( condition_description="when an asset fails to materialize", alert_name="asset-materialization-failure-alert", event_types=["ASSET_MATERIALIZATION_FAILED"], config_snippet={ "alert_targets": [ {"asset_key_target": {"asset_key": ["s3", "report"]}}, { "asset_group_target": { "asset_group": "transformed", "location_name": "prod", "repo_name": "__repository__", } }, ], }, ), AlertType( condition_description="when an asset check fails", alert_name="asset-check-failed", event_types=["ASSET_CHECK_SEVERITY_ERROR"], config_snippet={ "alert_targets": [ {"asset_key_target": {"asset_key": ["s3", "report"]}}, { "asset_group_target": { "asset_group": "transformed", "location_name": "prod", "repo_name": "__repository__", } }, ], }, ), AlertType( condition_description="when a schedule or sensor tick fails", alert_name="schedule-sensor-failure", event_types=["TICK_FAILURE"], config_snippet=None, ), AlertType( condition_description="when a code location fails to load", alert_name="code-location-error", event_types=["CODE_LOCATION_ERROR"], config_snippet=None, ), AlertType( condition_description="when a Hybrid agent hasn't sent a heartbeat in the last 5 minutes", alert_name="agent-unavailable-alert", event_types=["AGENT_UNAVAILABLE"], config_snippet=None, ), AlertType( condition_description="when an asset has exceeded a credit usage threshold", alert_name="insights-credit-alert", event_types=["INSIGHTS_CONSUMPTION_EXCEEDED"], config_snippet={ "alert_targets": [ { "insights_asset_threshold_target": { "metric_name": "__dagster_dagster_credits", "selection_period_days": 7, "threshold": 50, "operator": "GREATER_THAN", "asset_key": ["s3", "report"], } }, ], }, ), ] def _make_yaml_code_snippet(alert: AlertType, service: NotificationService) -> None: # creates a yaml code sample and writes it to the local directory alert_name = f"{alert.alert_name}-{service.name}" yaml_block = yaml.dump( dict( alert_policies=[ dict( name=alert_name, event_types=alert.event_types, description=f"Sends {service.effect_description} {alert.condition_description}.", **(alert.config_snippet if alert.config_snippet else {}), notification_service={service.name: service.config_snippet}, ) ] ), ) path = f"{alert_name}.yaml" with open(Path(__file__).parent / path, "w") as f: f.write(f"# alert_policies.yaml\n\n{yaml_block}") if __name__ == "__main__": for alert in ALERT_TYPES: for service in NOTIFICATION_SERVICES: _make_yaml_code_snippet(alert, service)
AlertType
python
google__jax
jax/_src/export/shape_poly.py
{ "start": 3045, "end": 9152 }
class ____: """Represents a factor in a symbolic dimension expression. Factors are either variables, or expressions of the form floordiv(E1, E2) or mod(E1, E2), or max(E1, E2), or min(E1, E2). Factors are multiplied to form terms (see _DimTerm), and terms are added to form symbolic expressions (see _DimExpr). Args: * var: if specified then the factor is a dimension variable. `operation` must be `None`. * operation: if specified then the factor is an operation applied to `operands`. One of `FLOORDIR` or `MOD` or `MAX` or `MIN`. `var` must be `None` * operands: the operands to which the operation is applied. """ # The supported operations # FLOORDIV(e1, e2) and MOD(e1, e2) have the same semantics as in Python: # FLOORDIV(e1, e2) = e1 // e2 = floor(e1 / e2) # if e2 > 0 then 0 <= MOD(e1, e2) < e2 # if e2 < 0 then e2 < MOD(e1, e2) <= 0 # e1 = e2 * FLOORDIV(e1, e2) + MOD(e1, e2) # FLOORDIV = "floordiv" MOD = "mod" MAX = "max" MIN = "min" __slots__ = ["var", "operation", "operands", "_hash", "_size"] def __init__(self, *operands: _DimExpr, var: str | None = None, operation: str | None = None): if var is not None: assert operation is None assert not operands else: assert operation is not None self.var = var self.operation = operation self.operands = operands self._hash = None self._size: int = 1 if var is not None else 1 + sum(o._size for o in operands) @staticmethod def from_var(v: str) -> _DimFactor: return _DimFactor(var=v) @staticmethod def from_operation(operation: str, *operands: DimSize, scope: SymbolicScope) -> _DimFactor: return _DimFactor(*(_ensure_poly(o, operation, scope) for o in operands), operation=operation) def to_var(self) -> str | None: return self.var def get_vars(self) -> set[str]: # All the vars that appear if self.var is not None: return {self.var} else: acc = set() for opnd in self.operands: acc.update(opnd._get_vars()) return acc def __str__(self): if self.var is not None: return self.var opnd_str = ", ".join([str(opnd) for opnd in self.operands]) return f"{self.operation}({opnd_str})" __repr__ = __str__ def __hash__(self): if self._hash is None: self._hash = hash((self.var, self.operation, *self.operands)) return self._hash def _syntactic_cmp(self, other: _DimFactor) -> int: """Returns -1 if self < other, 0 if self == other, 1 if self > other. The comparison is done lexicographically (syntactic), to be used for sorting. The result is not related to the semantic value. """ if c := cmp_comparable(self._size, other._size): return c if self.var is not None: return cmp_comparable(self.var, other.var) if c := cmp_comparable(self.operation, other.operation): return c return cmp_sequence(self.operands, other.operands, lambda s_o, o_o: s_o._syntactic_cmp(o_o)) def __eq__(self, other: Any): """Lexicographic comparison.""" if not isinstance(other, _DimFactor): return False return self._syntactic_cmp(other) == 0 def __lt__(self, other: _DimFactor): """Lexicographic comparison.""" return self._syntactic_cmp(other) < 0 def __le__(self, other: _DimFactor): """Lexicographic comparison.""" return self._syntactic_cmp(other) <= 0 def __gt__(self, other: _DimFactor): """Lexicographic comparison.""" return self._syntactic_cmp(other) > 0 def __ge__(self, other: _DimFactor): """Lexicographic comparison""" return self._syntactic_cmp(other) >= 0 def evaluate(self, env: DimVarEnv, scope: SymbolicScope): from jax._src.lax import lax # pytype: disable=import-error if self.var is not None: try: return env[self.var] except KeyError: # Perhaps there is a normalization rule for this variable normalized_var = _DimExpr._from_var(self.var, scope) if core.is_constant_dim(normalized_var): return normalized_var non_trivial_normalization = (v1 := normalized_var._to_var()) is None or v1 != self.var # type: ignore if non_trivial_normalization: return normalized_var._evaluate(env) # type: ignore err_msg = ( f"Encountered dimension variable '{self.var}' that is not appearing in the shapes of the function arguments.\n" "Please see https://docs.jax.dev/en/latest/export/shape_poly.html#dimension-variables-must-be-solvable-from-the-input-shapes for more details.") raise UnexpectedDimVar(err_msg) else: operand_values = [opnd._evaluate(env) for opnd in self.operands] if self.operation == _DimFactor.FLOORDIV: return divmod(*operand_values)[0] # type: ignore elif self.operation == _DimFactor.MOD: return divmod(*operand_values)[1] # type: ignore elif self.operation == _DimFactor.MAX: op1, op2 = operand_values if core.is_constant_dim(op1) and core.is_constant_dim(op2): return max(op1, op2) if core.is_symbolic_dim(op1) or core.is_symbolic_dim(op2): return core.max_dim(op1, op2) # In the context of `evaluate` dimension variables may be mapped to # JAX Tracers. return lax.max(op1, op2) elif self.operation == _DimFactor.MIN: op1, op2 = operand_values if core.is_constant_dim(op1) and core.is_constant_dim(op2): return min(op1, op2) if core.is_symbolic_dim(op1) or core.is_symbolic_dim(op2): return core.min_dim(op1, op2) # In the context of `evaluate` dimension variables may be mapped to # JAX Tracers. return lax.min(op1, op2) else: assert False, self.operation def __deepcopy__(self, memo): return _DimFactor(*copy.deepcopy(self.operands, memo), var=copy.deepcopy(self.var, memo), operation=copy.deepcopy(self.operation, memo))
_DimFactor
python
django-crispy-forms__django-crispy-forms
tests/forms.py
{ "start": 252, "end": 414 }
class ____(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.helper = FormHelper(self)
BaseModelForm
python
jina-ai__jina
jina/serve/networking/sse.py
{ "start": 6554, "end": 13446 }
class ____(Response): """Implements the ServerSentEvent Protocol: https://www.w3.org/TR/2009/WD-eventsource-20090421/ Responses must not be compressed by middleware in order to work. implementation based on Starlette StreamingResponse """ DEFAULT_PING_INTERVAL = 15 # noinspection PyMissingConstructor def __init__( self, content: Any, status_code: int = 200, headers: Optional[Dict] = None, media_type: str = "text/event-stream", background: Optional[BackgroundTask] = None, ping: Optional[int] = None, sep: Optional[str] = None, ping_message_factory: Optional[Callable[[], ServerSentEvent]] = None, data_sender_callable: Optional[ Callable[[], Coroutine[None, None, None]] ] = None, ) -> None: self.sep = sep self.ping_message_factory = ping_message_factory if isinstance(content, AsyncIterable): self.body_iterator = ( content ) # type: AsyncIterable[Union[Any,dict,ServerSentEvent]] else: self.body_iterator = iterate_in_threadpool(content) # type: ignore self.status_code = status_code self.media_type = self.media_type if media_type is None else media_type self.background = background # type: ignore # follows https://github.com/encode/starlette/blob/master/starlette/responses.py self.data_sender_callable = data_sender_callable _headers = {} if headers is not None: # pragma: no cover _headers.update(headers) # mandatory for servers-sent events headers # allow cache control header to be set by user to support fan out proxies # https://www.fastly.com/blog/server-sent-events-fastly _headers.setdefault("Cache-Control", "no-cache") _headers["Connection"] = "keep-alive" _headers["X-Accel-Buffering"] = "no" self.init_headers(_headers) self.ping_interval = self.DEFAULT_PING_INTERVAL if ping is None else ping self.active = True self._ping_task = None self._send_lock = anyio.Lock() @staticmethod async def listen_for_disconnect(receive: Receive) -> None: """ Listen for the client disconnecting :param receive: receive channel """ while True: message = await receive() if message["type"] == "http.disconnect": _log.debug("Got event: http.disconnect. Stop streaming.") break @staticmethod async def listen_for_exit_signal() -> None: """ Listen for the exit signal """ # Check if should_exit was set before anybody started waiting if AppStatus.should_exit: return # Setup an Event if AppStatus.should_exit_event is None: AppStatus.should_exit_event = anyio.Event() # Check if should_exit got set while we set up the event if AppStatus.should_exit: return # Await the event await AppStatus.should_exit_event.wait() async def stream_response(self, send) -> None: """ Stream the response :param send: send channel """ await send( { "type": "http.response.start", "status": self.status_code, "headers": self.raw_headers, } ) async for data in self.body_iterator: chunk = ensure_bytes(data) _log.debug(f"chunk: {chunk.decode()}") await send({"type": "http.response.body", "body": chunk, "more_body": True}) await send({"type": "http.response.body", "body": b"", "more_body": False}) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: """ Stream the response :param scope: scope :param receive: receive channel :param send: send channel """ async def safe_send(message): async with self._send_lock: return await send(message) async with anyio.create_task_group() as task_group: # https://trio.readthedocs.io/en/latest/reference-core.html#custom-supervisors async def wrap(func: Callable[[], Coroutine[None, None, None]]) -> None: await func() # noinspection PyAsyncCall task_group.cancel_scope.cancel() task_group.start_soon(wrap, partial(self.stream_response, safe_send)) task_group.start_soon(wrap, partial(self._ping, safe_send)) task_group.start_soon(wrap, self.listen_for_exit_signal) if self.data_sender_callable: task_group.start_soon(self.data_sender_callable) await wrap(partial(self.listen_for_disconnect, receive)) if self.background is not None: # pragma: no cover, tested in StreamResponse await self.background() def enable_compression(self, force: bool = False) -> None: """ Enable compression :param force: force compression """ raise NotImplementedError @property def ping_interval(self) -> Union[int, float]: """ Time interval between two ping massages :return: ping interval """ return self._ping_interval @ping_interval.setter def ping_interval(self, value: Union[int, float]) -> None: """Setter for ping_interval property. :param value: interval in sec between two ping values. """ if not isinstance(value, (int, float)): raise TypeError("ping interval must be int") if value < 0: raise ValueError("ping interval must be greater then 0") self._ping_interval = value async def _ping(self, send: Send) -> None: # Legacy proxy servers are known to, in certain cases, drop HTTP connections after a short timeout. # To protect against such proxy servers, authors can send a custom (ping) event # every 15 seconds or so. # Alternatively one can send periodically a comment line # (one starting with a ':' character) while self.active: await anyio.sleep(self._ping_interval) if self.ping_message_factory: assert isinstance(self.ping_message_factory, Callable) # type: ignore # https://github.com/python/mypy/issues/6864 ping = ( ServerSentEvent(comment=f"ping - {datetime.utcnow()}").encode() if self.ping_message_factory is None else ensure_bytes(self.ping_message_factory()) ) _log.debug(f"ping: {ping.decode()}") await send({"type": "http.response.body", "body": ping, "more_body": True})
EventSourceResponse
python
python-openxml__python-docx
src/docx/image/jpeg.py
{ "start": 8194, "end": 9503 }
class ____: """Base class for JFIF marker classes. Represents a marker and its segment occuring in a JPEG byte stream. """ def __init__(self, marker_code, offset, segment_length): super(_Marker, self).__init__() self._marker_code = marker_code self._offset = offset self._segment_length = segment_length @classmethod def from_stream(cls, stream, marker_code, offset): """Return a generic |_Marker| instance for the marker at `offset` in `stream` having `marker_code`.""" if JPEG_MARKER_CODE.is_standalone(marker_code): segment_length = 0 else: segment_length = stream.read_short(offset) return cls(marker_code, offset, segment_length) @property def marker_code(self): """The single-byte code that identifies the type of this marker, e.g. ``'\xe0'`` for start of image (SOI).""" return self._marker_code @property def name(self): # pragma: no cover return JPEG_MARKER_CODE.marker_names[self._marker_code] @property def offset(self): # pragma: no cover return self._offset @property def segment_length(self): """The length in bytes of this marker's segment.""" return self._segment_length
_Marker
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_B.py
{ "start": 7260, "end": 8649 }
class ____(Benchmark): r""" Bird objective function. The Bird global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\text{Bird}}(x) = \left(x_1 - x_2\right)^{2} + e^{\left[1 - \sin\left(x_1\right) \right]^{2}} \cos\left(x_2\right) + e^{\left[1 - \cos\left(x_2\right)\right]^{2}} \sin\left(x_1\right) with :math:`x_i \in [-2\pi, 2\pi]` *Global optimum*: :math:`f(x) = -106.7645367198034` for :math:`x = [4.701055751981055, 3.152946019601391]` or :math:`x = [-1.582142172055011, -3.130246799635430]` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-2.0 * pi] * self.N, [2.0 * pi] * self.N)) self.global_optimum = [[4.701055751981055, 3.152946019601391], [-1.582142172055011, -3.130246799635430]] self.fglob = -106.7645367198034 def fun(self, x, *args): self.nfev += 1 return (sin(x[0]) * exp((1 - cos(x[1])) ** 2) + cos(x[1]) * exp((1 - sin(x[0])) ** 2) + (x[0] - x[1]) ** 2)
Bird
python
ray-project__ray
python/ray/tests/test_autoscaler_azure.py
{ "start": 5698, "end": 16561 }
class ____(unittest.TestCase): """Test cases for Azure availability zone precedence logic.""" def setUp(self): """Set up test fixtures.""" self.base_provider_config = { "resource_group": "test-rg", "location": "westus2", "subscription_id": "test-sub-id", } self.cluster_name = "test-cluster" def _create_mock_provider(self, provider_config=None): """Create a mock Azure provider for testing.""" config = copy.deepcopy(self.base_provider_config) if provider_config: config.update(provider_config) with patch.object( AzureNodeProvider, "__init__", lambda self, provider_config, cluster_name: None, ): provider = AzureNodeProvider(config, self.cluster_name) provider.provider_config = config provider.cluster_name = self.cluster_name # Mock the validation method to avoid Azure API calls provider._validate_zones_for_node_pool = Mock( side_effect=lambda zones, location, vm_size: zones ) return provider def _extract_zone_logic(self, provider, node_config): """Extract zone determination logic similar to _create_node method.""" node_availability_zone = node_config.get("azure_arm_parameters", {}).get( "availability_zone" ) provider_availability_zone = provider.provider_config.get("availability_zone") if node_availability_zone is not None: return ( provider._parse_availability_zones(node_availability_zone), "node config availability_zone", ) elif provider_availability_zone is not None: return ( provider._parse_availability_zones(provider_availability_zone), "provider availability_zone", ) else: return ([], "default") def test_node_availability_zone_overrides_provider(self): """Test that node-level availability_zone overrides provider-level.""" provider = self._create_mock_provider({"availability_zone": "1,2"}) node_config = { "azure_arm_parameters": { "vmSize": "Standard_D2s_v3", "availability_zone": "3", } } zones, source = self._extract_zone_logic(provider, node_config) self.assertEqual(zones, ["3"]) self.assertEqual(source, "node config availability_zone") def test_provider_availability_zone_used_when_no_node_override(self): """Test that provider-level availability_zone is used when no node override.""" provider = self._create_mock_provider({"availability_zone": "1,2"}) node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}} zones, source = self._extract_zone_logic(provider, node_config) self.assertEqual(zones, ["1", "2"]) self.assertEqual(source, "provider availability_zone") def test_none_disables_zones_at_node_level(self): """Test that 'none' at node level disables zones even with provider zones.""" provider = self._create_mock_provider({"availability_zone": "1,2"}) node_config = { "azure_arm_parameters": { "vmSize": "Standard_D2s_v3", "availability_zone": "none", } } zones, source = self._extract_zone_logic(provider, node_config) self.assertIsNone(zones) self.assertEqual(source, "node config availability_zone") def test_no_zones_when_neither_provider_nor_node_specify(self): """Test default behavior when neither provider nor node specify zones.""" provider = self._create_mock_provider() node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}} zones, source = self._extract_zone_logic(provider, node_config) self.assertEqual(zones, []) self.assertEqual(source, "default") def test_node_empty_string_overrides_provider_zones(self): """Test that node empty string overrides provider zones (auto-selection).""" provider = self._create_mock_provider({"availability_zone": "1,2"}) node_config = { "azure_arm_parameters": { "vmSize": "Standard_D2s_v3", "availability_zone": "", } } zones, source = self._extract_zone_logic(provider, node_config) self.assertEqual(zones, []) self.assertEqual(source, "node config availability_zone") def test_node_auto_overrides_provider_zones(self): """Test that node 'auto' overrides provider zones (auto-selection).""" provider = self._create_mock_provider({"availability_zone": "1,2"}) node_config = { "azure_arm_parameters": { "vmSize": "Standard_D2s_v3", "availability_zone": "auto", } } zones, source = self._extract_zone_logic(provider, node_config) self.assertEqual(zones, []) self.assertEqual(source, "node config availability_zone") def test_provider_none_disables_zones(self): """Test that provider-level 'none' disables zones.""" provider = self._create_mock_provider({"availability_zone": "none"}) node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}} zones, source = self._extract_zone_logic(provider, node_config) self.assertIsNone(zones) self.assertEqual(source, "provider availability_zone") def test_provider_empty_string_allows_auto_selection(self): """Test that provider-level empty string allows auto-selection.""" provider = self._create_mock_provider({"availability_zone": ""}) node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}} zones, source = self._extract_zone_logic(provider, node_config) self.assertEqual(zones, []) self.assertEqual(source, "provider availability_zone") def test_provider_auto_allows_auto_selection(self): """Test that provider-level 'auto' allows auto-selection.""" provider = self._create_mock_provider({"availability_zone": "auto"}) node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}} zones, source = self._extract_zone_logic(provider, node_config) self.assertEqual(zones, []) self.assertEqual(source, "provider availability_zone") def test_node_null_overrides_provider_zones(self): """Test that node-level 'null' overrides provider zones.""" provider = self._create_mock_provider({"availability_zone": "1,2,3"}) node_config = { "azure_arm_parameters": { "vmSize": "Standard_D2s_v3", "availability_zone": "null", } } zones, source = self._extract_zone_logic(provider, node_config) self.assertIsNone(zones) self.assertEqual(source, "node config availability_zone") def test_provider_null_disables_zones(self): """Test that provider-level 'null' disables zones.""" provider = self._create_mock_provider({"availability_zone": "NULL"}) node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}} zones, source = self._extract_zone_logic(provider, node_config) self.assertIsNone(zones) self.assertEqual(source, "provider availability_zone") def test_complex_override_scenario(self): """Test complex scenario with multiple node types and different overrides.""" provider = self._create_mock_provider({"availability_zone": "1,2,3"}) # Test different node configurations test_cases = [ # Node with specific zone override { "config": { "azure_arm_parameters": { "vmSize": "Standard_D2s_v3", "availability_zone": "2", } }, "expected_zones": ["2"], "expected_source": "node config availability_zone", }, # Node with disabled zones { "config": { "azure_arm_parameters": { "vmSize": "Standard_D2s_v3", "availability_zone": "none", } }, "expected_zones": None, "expected_source": "node config availability_zone", }, # Node with auto-selection { "config": { "azure_arm_parameters": { "vmSize": "Standard_D2s_v3", "availability_zone": "", } }, "expected_zones": [], "expected_source": "node config availability_zone", }, # Node using provider default { "config": {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}}, "expected_zones": ["1", "2", "3"], "expected_source": "provider availability_zone", }, ] for i, test_case in enumerate(test_cases): with self.subTest(case=i): zones, source = self._extract_zone_logic(provider, test_case["config"]) self.assertEqual(zones, test_case["expected_zones"]) self.assertEqual(source, test_case["expected_source"]) def test_mixed_case_precedence(self): """Test precedence with mixed case 'none' values.""" provider = self._create_mock_provider({"availability_zone": "None"}) node_config = { "azure_arm_parameters": { "vmSize": "Standard_D2s_v3", "availability_zone": "NONE", } } zones, source = self._extract_zone_logic(provider, node_config) # Both should be None (disabled), but node should take precedence self.assertIsNone(zones) self.assertEqual(source, "node config availability_zone") def test_whitespace_handling_in_precedence(self): """Test that whitespace is properly handled in precedence logic.""" provider = self._create_mock_provider({"availability_zone": " 1, 2, 3 "}) node_config = { "azure_arm_parameters": { "vmSize": "Standard_D2s_v3", "availability_zone": " 2 ", } } zones, source = self._extract_zone_logic(provider, node_config) self.assertEqual(zones, ["2"]) self.assertEqual(source, "node config availability_zone") if __name__ == "__main__": unittest.main()
TestAzureAvailabilityZonePrecedence
python
celery__celery
t/unit/worker/test_request.py
{ "start": 2977, "end": 3199 }
class ____: def test_retry_semipredicate(self): try: raise Exception('foo') except Exception as exc: ret = Retry('Retrying task', exc) assert ret.exc == exc
test_Retry
python
kamyu104__LeetCode-Solutions
Python/neither-minimum-nor-maximum.py
{ "start": 47, "end": 559 }
class ____(object): def findNonMinOrMax(self, nums): """ :type nums: List[int] :rtype: int """ mx, mn = float("-inf"), float("inf") result = -1 for x in nums: if mn < x < mx: return x if x < mn: result = mn mn = x if x > mx: result = mx mx = x return result if mn < result < mx else -1 # Time: O(n) # Space: O(1) # array
Solution
python
pytransitions__transitions
transitions/extensions/asyncio.py
{ "start": 25847, "end": 31533 }
class ____(HierarchicalMachine, AsyncMachine): """Asynchronous variant of transitions.extensions.nesting.HierarchicalMachine. An asynchronous hierarchical machine REQUIRES AsyncNestedStates, AsyncNestedEvent and AsyncNestedTransitions (or any subclass of it) to operate. """ state_cls = NestedAsyncState transition_cls = NestedAsyncTransition event_cls = NestedAsyncEvent async def trigger_event(self, model, trigger, *args, **kwargs): """Processes events recursively and forwards arguments if suitable events are found. This function is usually bound to models with model and trigger arguments already resolved as a partial. Execution will halt when a nested transition has been executed successfully. Args: model (object): targeted model trigger (str): event name *args: positional parameters passed to the event and its callbacks **kwargs: keyword arguments passed to the event and its callbacks Returns: bool: whether a transition has been executed successfully Raises: MachineError: When no suitable transition could be found and ignore_invalid_trigger is not True. Note that a transition which is not executed due to conditions is still considered valid. """ event_data = AsyncEventData(state=None, event=None, machine=self, model=model, args=args, kwargs=kwargs) event_data.result = None return await self.process_context(partial(self._trigger_event, event_data, trigger), model) async def _trigger_event(self, event_data, trigger): try: with self(): res = await self._trigger_event_nested(event_data, trigger, None) event_data.result = self._check_event_result(res, event_data.model, trigger) except BaseException as err: # pylint: disable=broad-except; Exception will be handled elsewhere event_data.error = err if self.on_exception: await self.callbacks(self.on_exception, event_data) else: raise finally: try: await self.callbacks(self.finalize_event, event_data) _LOGGER.debug("%sExecuted machine finalize callbacks", self.name) except BaseException as err: # pylint: disable=broad-except; Exception will be handled elsewhere _LOGGER.error("%sWhile executing finalize callbacks a %s occurred: %s.", self.name, type(err).__name__, str(err)) return event_data.result async def _trigger_event_nested(self, event_data, _trigger, _state_tree): model = event_data.model if _state_tree is None: _state_tree = self.build_state_tree(listify(getattr(model, self.model_attribute)), self.state_cls.separator) res = {} for key, value in _state_tree.items(): if value: with self(key): tmp = await self._trigger_event_nested(event_data, _trigger, value) if tmp is not None: res[key] = tmp if not res.get(key, None) and _trigger in self.events: tmp = await self.events[_trigger].trigger_nested(event_data) if tmp is not None: res[key] = tmp return None if not res or all(v is None for v in res.values()) else any(res.values()) async def _can_trigger(self, model, trigger, *args, **kwargs): state_tree = self.build_state_tree(getattr(model, self.model_attribute), self.state_cls.separator) ordered_states = resolve_order(state_tree) for state_path in ordered_states: with self(): return await self._can_trigger_nested(model, trigger, state_path, *args, **kwargs) async def _can_trigger_nested(self, model, trigger, path, *args, **kwargs): if trigger in self.events: source_path = copy.copy(path) while source_path: event_data = AsyncEventData(self.get_state(source_path), AsyncEvent(name=trigger, machine=self), self, model, args, kwargs) state_name = self.state_cls.separator.join(source_path) for transition in self.events[trigger].transitions.get(state_name, []): try: _ = self.get_state(transition.dest) if transition.dest is not None else transition.source except ValueError: continue event_data.transition = transition try: await self.callbacks(self.prepare_event, event_data) await self.callbacks(transition.prepare, event_data) if all(await self.await_all([partial(c.check, event_data) for c in transition.conditions])): return True except BaseException as err: event_data.error = err if self.on_exception: await self.callbacks(self.on_exception, event_data) else: raise source_path.pop(-1) if path: with self(path.pop(0)): return await self._can_trigger_nested(model, trigger, path, *args, **kwargs) return False
HierarchicalAsyncMachine
python
aio-libs__aiohttp
aiohttp/abc.py
{ "start": 6305, "end": 6814 }
class ____(ABC): """Abstract writer to access log.""" __slots__ = ("logger", "log_format") def __init__(self, logger: logging.Logger, log_format: str) -> None: self.logger = logger self.log_format = log_format @abstractmethod def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None: """Emit log to logger.""" @property def enabled(self) -> bool: """Check if logger is enabled.""" return True
AbstractAccessLogger
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/util/typing.py
{ "start": 2282, "end": 2471 }
class ____(Protocol): """protocol for types that have ``__args__`` there's no public interface for this AFAIK """ __args__: Tuple[_AnnotationScanType, ...]
ArgsTypeProtocol
python
pytorch__pytorch
torch/testing/_internal/distributed/multi_threaded_pg.py
{ "start": 6565, "end": 7264 }
class ____: def __init__(self, src): self.src = src @torch.no_grad() def work(self, data): src_in_tensor_list = data[self.src][1] # Can't handle scatter with multiple input tensor list assert len(src_in_tensor_list) == 1 src_in_tensors = src_in_tensor_list[0] for rank, each_rank_data in enumerate(data): out_tensor_list = each_rank_data[0] # Can't handle scatter with multiple output tensor assert len(out_tensor_list) == 1 dest_tensor = out_tensor_list[0] # See Note [Hide collectives mutation from autograd] dest_tensor.detach().copy_(src_in_tensors[rank])
Scatter
python
apache__airflow
providers/common/sql/tests/unit/common/sql/operators/test_sql_execute.py
{ "start": 1497, "end": 1549 }
class ____(NamedTuple): id: str value: str
Row
python
ApeWorX__ape
src/ape/exceptions.py
{ "start": 5007, "end": 8581 }
class ____(ApeException): """ Raised when issues occur related to transactions. """ DEFAULT_MESSAGE = "Transaction failed." def __init__( self, message: Optional[str] = None, base_err: Optional[Exception] = None, code: Optional[int] = None, txn: Optional[FailedTxn] = None, trace: _TRACE_ARG = None, contract_address: Optional["AddressType"] = None, source_traceback: _SOURCE_TRACEBACK_ARG = None, project: Optional["ProjectManager"] = None, set_ape_traceback: bool = False, # Overridden in ContractLogicError ): message = message or (str(base_err) if base_err else self.DEFAULT_MESSAGE) self.message = message self.base_err = base_err self.code = code self.txn = txn self._trace = trace self.contract_address = contract_address self._source_traceback = source_traceback self._project = project ex_message = f"({code}) {message}" if code else message # Finalizes expected revert message. super().__init__(ex_message) self._attempted_source_traceback = False if set_ape_traceback: self.with_ape_traceback() @property def address(self) -> Optional["AddressType"]: if addr := self.contract_address: return addr receiver = getattr(self.txn, "receiver", None) if receiver in (None, "0x0000000000000000000000000000000000000000"): # Check if deploy if addr := getattr(self.txn, "contract_address", None): return addr return receiver @cached_property def contract_type(self) -> Optional["ContractType"]: if not (address := self.address): # Contract address not found. return None # Lazy import because of exceptions.py root nature. from ape.utils.basemodel import ManagerAccessMixin try: return ManagerAccessMixin.chain_manager.contracts.get(address) except (RecursionError, ProviderNotConnectedError): return None @property def trace(self) -> Optional["TraceAPI"]: tr = self._trace if callable(tr): result = tr() self._trace = result return result return tr @trace.setter def trace(self, value): self._trace = value @property def source_traceback(self) -> Optional["SourceTraceback"]: tb = self._source_traceback result: Optional[SourceTraceback] if not self._attempted_source_traceback and tb is None and self.txn is not None: result = _get_ape_traceback_from_tx(self.txn) # Prevent re-trying. self._attempted_source_traceback = True elif callable(tb): result = tb() else: result = tb self._source_traceback = result return result @source_traceback.setter def source_traceback(self, value): self._source_traceback = value def _get_ape_traceback(self) -> Optional[TracebackType]: if src_tb := self.source_traceback: # Create a custom Pythonic traceback using lines from the sources # found from analyzing the trace of the transaction. if py_tb := _get_custom_python_traceback(self, src_tb, project=self._project): return py_tb return None def with_ape_traceback(self): return self.with_traceback(self._get_ape_traceback())
TransactionError
python
django__django
tests/forms_tests/tests/test_widgets.py
{ "start": 219, "end": 922 }
class ____(AdminSeleniumTestCase): available_apps = ["forms_tests"] + AdminSeleniumTestCase.available_apps def test_textarea_trailing_newlines(self): """ A roundtrip on a ModelForm doesn't alter the TextField value """ from selenium.webdriver.common.by import By article = Article.objects.create(content="\nTst\n") self.selenium.get( self.live_server_url + reverse("article_form", args=[article.pk]) ) with self.wait_page_loaded(): self.selenium.find_element(By.ID, "submit").click() article = Article.objects.get(pk=article.pk) self.assertEqual(article.content, "\r\nTst\r\n")
LiveWidgetTests
python
giampaolo__psutil
tests/test_aix.py
{ "start": 417, "end": 4358 }
class ____(PsutilTestCase): def test_virtual_memory(self): out = sh('/usr/bin/svmon -O unit=KB') re_pattern = r"memory\s*" for field in [ "size", "inuse", "free", "pin", "virtual", "available", "mmode", ]: re_pattern += rf"(?P<{field}>\S+)\s+" matchobj = re.search(re_pattern, out) assert matchobj is not None KB = 1024 total = int(matchobj.group("size")) * KB available = int(matchobj.group("available")) * KB used = int(matchobj.group("inuse")) * KB free = int(matchobj.group("free")) * KB psutil_result = psutil.virtual_memory() # TOLERANCE_SYS_MEM is not enough. For some reason we're seeing # differences of ~1.2 MB. 2 MB is still a good tolerance when # compared to GBs. TOLERANCE_SYS_MEM = 2 * KB * KB # 2 MB assert psutil_result.total == total assert abs(psutil_result.used - used) < TOLERANCE_SYS_MEM assert abs(psutil_result.available - available) < TOLERANCE_SYS_MEM assert abs(psutil_result.free - free) < TOLERANCE_SYS_MEM def test_swap_memory(self): out = sh('/usr/sbin/lsps -a') # From the man page, "The size is given in megabytes" so we assume # we'll always have 'MB' in the result # TODO maybe try to use "swap -l" to check "used" too, but its units # are not guaranteed to be "MB" so parsing may not be consistent matchobj = re.search( r"(?P<space>\S+)\s+" r"(?P<vol>\S+)\s+" r"(?P<vg>\S+)\s+" r"(?P<size>\d+)MB", out, ) assert matchobj is not None total_mb = int(matchobj.group("size")) MB = 1024**2 psutil_result = psutil.swap_memory() # we divide our result by MB instead of multiplying the lsps value by # MB because lsps may round down, so we round down too assert int(psutil_result.total / MB) == total_mb def test_cpu_stats(self): out = sh('/usr/bin/mpstat -a') re_pattern = r"ALL\s*" for field in [ "min", "maj", "mpcs", "mpcr", "dev", "soft", "dec", "ph", "cs", "ics", "bound", "rq", "push", "S3pull", "S3grd", "S0rd", "S1rd", "S2rd", "S3rd", "S4rd", "S5rd", "sysc", ]: re_pattern += rf"(?P<{field}>\S+)\s+" matchobj = re.search(re_pattern, out) assert matchobj is not None # numbers are usually in the millions so 1000 is ok for tolerance CPU_STATS_TOLERANCE = 1000 psutil_result = psutil.cpu_stats() assert ( abs(psutil_result.ctx_switches - int(matchobj.group("cs"))) < CPU_STATS_TOLERANCE ) assert ( abs(psutil_result.syscalls - int(matchobj.group("sysc"))) < CPU_STATS_TOLERANCE ) assert ( abs(psutil_result.interrupts - int(matchobj.group("dev"))) < CPU_STATS_TOLERANCE ) assert ( abs(psutil_result.soft_interrupts - int(matchobj.group("soft"))) < CPU_STATS_TOLERANCE ) def test_cpu_count_logical(self): out = sh('/usr/bin/mpstat -a') mpstat_lcpu = int(re.search(r"lcpu=(\d+)", out).group(1)) psutil_lcpu = psutil.cpu_count(logical=True) assert mpstat_lcpu == psutil_lcpu def test_net_if_addrs_names(self): out = sh('/etc/ifconfig -l') ifconfig_names = set(out.split()) psutil_names = set(psutil.net_if_addrs().keys()) assert ifconfig_names == psutil_names
AIXSpecificTestCase
python
dask__dask
dask/dataframe/dask_expr/_concat.py
{ "start": 11133, "end": 11807 }
class ____(StackPartition): def _divisions(self): return self._frames[0].divisions def _layer(self): dsk, i = {}, 0 kwargs = self._kwargs.copy() kwargs["ignore_order"] = self.ignore_order dfs = self._frames for i in range(self.npartitions): dsk[(self._name, i)] = ( apply, methods.concat, [ [(df._name, i) for df in dfs], self.axis, self.join, False, True, ], kwargs, ) return dsk
StackPartitionInterleaved
python
docker__docker-py
tests/integration/api_image_test.py
{ "start": 12227, "end": 12533 }
class ____(BaseAPIIntegrationTest): def test_inspect_distribution(self): data = self.client.inspect_distribution('busybox:latest') assert data is not None assert 'Platforms' in data assert {'os': 'linux', 'architecture': 'amd64'} in data['Platforms']
InspectDistributionTest
python
wandb__wandb
wandb/vendor/pygments/lexers/templates.py
{ "start": 32603, "end": 33402 }
class ____(DelegatingLexer): """ A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and `kid <http://kid-templating.org/>`_ kid XML templates. """ name = 'Genshi' aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid'] filenames = ['*.kid'] alias_filenames = ['*.xml'] mimetypes = ['application/x-genshi', 'application/x-kid'] def __init__(self, **options): super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer, **options) def analyse_text(text): rv = 0.0 if re.search('\$\{.*?\}', text) is not None: rv += 0.2 if re.search('py:(.*?)=["\']', text) is not None: rv += 0.2 return rv + XmlLexer.analyse_text(text) - 0.01
GenshiLexer
python
walkccc__LeetCode
solutions/860. Lemonade Change/860.py
{ "start": 0, "end": 401 }
class ____: def lemonadeChange(self, bills: list[int]) -> bool: fives = 0 tens = 0 for bill in bills: if bill == 5: fives += 1 elif bill == 10: fives -= 1 tens += 1 else: # bill == 20 if tens > 0: tens -= 1 fives -= 1 else: fives -= 3 if fives < 0: return False return True
Solution
python
huggingface__transformers
src/transformers/models/m2m_100/modeling_m2m_100.py
{ "start": 22192, "end": 22622 }
class ____(PreTrainedModel): config: M2M100Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["M2M100EncoderLayer", "M2M100DecoderLayer"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True # Doesn't support `compile` (dynamic control flow). Can be fixed but low usage model _can_compile_fullgraph = False
M2M100PreTrainedModel
python
huggingface__transformers
src/transformers/models/patchtst/modeling_patchtst.py
{ "start": 78193, "end": 84726 }
class ____(PatchTSTPreTrainedModel): def __init__(self, config: PatchTSTConfig): super().__init__(config) # Turn off masking if config.do_mask_input: logger.warning("Setting `do_mask_input` parameter to False.") config.do_mask_input = False self.model = PatchTSTModel(config) if config.loss == "mse": self.distribution_output = None else: if config.distribution_output == "student_t": self.distribution_output = StudentTOutput(dim=config.num_targets) elif config.distribution_output == "normal": self.distribution_output = NormalOutput(dim=config.num_targets) elif config.distribution_output == "negative_binomial": self.distribution_output = NegativeBinomialOutput(dim=config.num_targets) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") self.head = PatchTSTRegressionHead(config, self.distribution_output) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, past_values: torch.Tensor, target_values: Optional[torch.Tensor] = None, past_observed_mask: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, PatchTSTForRegressionOutput]: r""" past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): Input sequence to the model target_values (`torch.Tensor` of shape `(bs, num_input_channels)`): Target values associates with the `past_values` past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Whether or not to return a `ModelOutput` instead of a plain tuple. Examples: ```python >>> from transformers import PatchTSTConfig, PatchTSTForRegression >>> # Regression task with 6 input channels and regress 2 targets >>> model = PatchTSTForRegression.from_pretrained("namctin/patchtst_etth1_regression") >>> # during inference, one only provides past values, the model outputs future values >>> past_values = torch.randn(20, 512, 6) >>> outputs = model(past_values=past_values) >>> regression_outputs = outputs.regression_outputs ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict model_output = self.model( past_values=past_values, past_observed_mask=past_observed_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=True, ) # get output head. y_hat is of shape [bs x num_targets] or tuple of this shape y_hat = self.head(model_output.last_hidden_state) loss = None if target_values is not None: if self.distribution_output: distribution = self.distribution_output.distribution(y_hat) # y_hat should be a 2-tuple, each with dimension [bs, num_targets] y_hat = tuple(item.view(-1, self.config.num_targets) for item in y_hat) loss = nll(distribution, target_values) # take average of the loss loss = weighted_average(loss) else: loss = nn.MSELoss(reduction="mean") loss = loss(y_hat, target_values) if not return_dict: # hidden_states, attentions, mask outputs = (y_hat,) + model_output[1:-3] outputs = (loss,) + outputs if loss is not None else outputs return outputs return PatchTSTForRegressionOutput( loss=loss, regression_outputs=y_hat, hidden_states=model_output.hidden_states, attentions=model_output.attentions, ) @torch.no_grad() def generate( self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, ) -> SamplePatchTSTOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Parameters: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the future. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Return: [`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, num_targets)`. """ # get number of samples num_parallel_samples = self.config.num_parallel_samples # get model output outputs = self( past_values=past_values, target_values=None, past_observed_mask=past_observed_mask, output_hidden_states=False, ) # get distribution distribution = self.distribution_output.distribution(outputs.regression_outputs) # get samples: list of [bs x num_targets] samples = [distribution.sample() for _ in range(num_parallel_samples)] # samples: [bs x num_samples x num_targets] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSTOutput(sequences=samples) __all__ = [ "PatchTSTModel", "PatchTSTPreTrainedModel", "PatchTSTForPrediction", "PatchTSTForPretraining", "PatchTSTForRegression", "PatchTSTForClassification", ]
PatchTSTForRegression
python
python-poetry__poetry
tests/types.py
{ "start": 3983, "end": 4084 }
class ____(Protocol): def __call__(self, name: str) -> DistributionHash: ...
DistributionHashGetter
python
pytorch__pytorch
torch/fx/graph.py
{ "start": 43846, "end": 44970 }
class ____: """ Side table for the graph for the purpose of doing fast queries """ def __init__(self): self.table: dict[tuple[str, Optional[Target]], dict[Node, None]] = defaultdict( dict ) def _key(self, node) -> tuple[str, Optional[Target]]: return (node.op, node.target if node.op == "call_function" else None) def __contains__(self, node) -> bool: return node in self.table[self._key(node)] def insert(self, node: Node) -> None: self.table[self._key(node)][node] = None def remove(self, node: Node) -> None: self.table[self._key(node)].pop(node) def find_nodes(self, *, op: str, target: Optional["Target"] = None): if op == "call_function": assert target is not None return [*self.table[(op, target)].keys()] if target is None: return [*self.table[(op, None)].keys()] # op is call_method, get_attr, call_module return [node for node in self.table[(op, None)] if node.target == target] @compatibility(is_backward_compatible=True)
_FindNodesLookupTable
python
scipy__scipy
benchmarks/benchmarks/fft_basic.py
{ "start": 1682, "end": 2405 }
class ____(Benchmark): params = [ [100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4], ['real', 'cmplx'], ['scipy.fftpack', 'scipy.fft', 'numpy.fft'] ] param_names = ['size', 'type', 'module'] def setup(self, size, cmplx, module): if cmplx == 'cmplx': self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j else: self.x = random([size]).astype(double) module = get_module(module) self.fft = getattr(module, 'fft') self.ifft = getattr(module, 'ifft') def time_fft(self, size, cmplx, module): self.fft(self.x) def time_ifft(self, size, cmplx, module): self.ifft(self.x)
Fft
python
python-openxml__python-docx
src/docx/oxml/table.py
{ "start": 9686, "end": 12568 }
class ____(BaseOxmlElement): """``<w:tblPr>`` element, child of ``<w:tbl>``, holds child elements that define table properties such as style and borders.""" get_or_add_bidiVisual: Callable[[], CT_OnOff] get_or_add_jc: Callable[[], CT_Jc] get_or_add_tblLayout: Callable[[], CT_TblLayoutType] _add_tblStyle: Callable[[], CT_String] _remove_bidiVisual: Callable[[], None] _remove_jc: Callable[[], None] _remove_tblStyle: Callable[[], None] _tag_seq = ( "w:tblStyle", "w:tblpPr", "w:tblOverlap", "w:bidiVisual", "w:tblStyleRowBandSize", "w:tblStyleColBandSize", "w:tblW", "w:jc", "w:tblCellSpacing", "w:tblInd", "w:tblBorders", "w:shd", "w:tblLayout", "w:tblCellMar", "w:tblLook", "w:tblCaption", "w:tblDescription", "w:tblPrChange", ) tblStyle: CT_String | None = ZeroOrOne( # pyright: ignore[reportAssignmentType] "w:tblStyle", successors=_tag_seq[1:] ) bidiVisual: CT_OnOff | None = ZeroOrOne( # pyright: ignore[reportAssignmentType] "w:bidiVisual", successors=_tag_seq[4:] ) jc: CT_Jc | None = ZeroOrOne( # pyright: ignore[reportAssignmentType] "w:jc", successors=_tag_seq[8:] ) tblLayout: CT_TblLayoutType | None = ZeroOrOne( # pyright: ignore[reportAssignmentType] "w:tblLayout", successors=_tag_seq[13:] ) del _tag_seq @property def alignment(self) -> WD_TABLE_ALIGNMENT | None: """Horizontal alignment of table, |None| if `./w:jc` is not present.""" jc = self.jc if jc is None: return None return cast("WD_TABLE_ALIGNMENT | None", jc.val) @alignment.setter def alignment(self, value: WD_TABLE_ALIGNMENT | None): self._remove_jc() if value is None: return jc = self.get_or_add_jc() jc.val = cast("WD_ALIGN_PARAGRAPH", value) @property def autofit(self) -> bool: """|False| when there is a `w:tblLayout` child with `@w:type="fixed"`. Otherwise |True|. """ tblLayout = self.tblLayout return True if tblLayout is None else tblLayout.type != "fixed" @autofit.setter def autofit(self, value: bool): tblLayout = self.get_or_add_tblLayout() tblLayout.type = "autofit" if value else "fixed" @property def style(self): """Return the value of the ``val`` attribute of the ``<w:tblStyle>`` child or |None| if not present.""" tblStyle = self.tblStyle if tblStyle is None: return None return tblStyle.val @style.setter def style(self, value: str | None): self._remove_tblStyle() if value is None: return self._add_tblStyle().val = value
CT_TblPr
python
ansible__ansible
lib/ansible/plugins/lookup/csvfile.py
{ "start": 4488, "end": 6487 }
class ____(LookupBase): def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1, keycol=0): with open(filename, encoding=encoding) as f: for row in csv.reader(f, dialect=csv.excel, delimiter=delimiter): if row and row[keycol] == key: return row[col] return dflt def run(self, terms, variables=None, **kwargs): ret = [] self.set_options(var_options=variables, direct=kwargs) # populate options paramvals = self.get_options() if not terms: raise AnsibleError('Search key is required but was not found') for term in terms: kv = parse_kv(term) if '_raw_params' not in kv: raise AnsibleError('Search key is required but was not found') key = kv['_raw_params'] # parameters override per term using k/v reset_params = False for name, value in kv.items(): if name == '_raw_params': continue if name not in paramvals: raise ValueError(f'{name!r} is not a valid option') self._deprecate_inline_kv() self.set_option(name, value) reset_params = True if reset_params: paramvals = self.get_options() # default is just placeholder for real tab if paramvals['delimiter'] == 'TAB': paramvals['delimiter'] = "\t" lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file']) var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col'], paramvals['keycol']) if var is not None: if isinstance(var, MutableSequence): ret.extend(var) else: ret.append(var) return ret
LookupModule
python
pytorch__pytorch
torch/_higher_order_ops/base_hop.py
{ "start": 10525, "end": 10686 }
class ____: def __init__(self, fn): self.fn = fn def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs)
FunctionWithNoFreeVars