language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
scikit-learn__scikit-learn
sklearn/base.py
{ "start": 40992, "end": 42057 }
class ____: """Mixin class for all meta estimators in scikit-learn. This mixin is empty, and only exists to indicate that the estimator is a meta-estimator. .. versionchanged:: 1.6 The `_required_parameters` is now removed and is unnecessary since tests are refactored and don't use this anymore. Examples -------- >>> from sklearn.base import MetaEstimatorMixin >>> from sklearn.datasets import load_iris >>> from sklearn.linear_model import LogisticRegression >>> class MyEstimator(MetaEstimatorMixin): ... def __init__(self, *, estimator=None): ... self.estimator = estimator ... def fit(self, X, y=None): ... if self.estimator is None: ... self.estimator_ = LogisticRegression() ... else: ... self.estimator_ = self.estimator ... return self >>> X, y = load_iris(return_X_y=True) >>> estimator = MyEstimator().fit(X, y) >>> estimator.estimator_ LogisticRegression() """
MetaEstimatorMixin
python
dask__dask
dask/dataframe/dask_expr/_expr.py
{ "start": 51896, "end": 52057 }
class ____(Elemwise): _projection_passthrough = True _parameters = ["frame", "cond", "other"] _defaults = {"other": np.nan} operation = M.mask
Mask
python
pydantic__pydantic
pydantic-core/tests/benchmarks/test_micro_benchmarks.py
{ "start": 18159, "end": 20499 }
class ____: @pytest.fixture(scope='class') def core_validator(self): class CoreModel: __slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__' return SchemaValidator( schema=core_schema.model_schema( cls=CoreModel, schema=core_schema.model_fields_schema( fields={'dt': core_schema.model_field(schema=core_schema.datetime_schema())} ), ) ) @pytest.fixture(scope='class') def datetime_raw(self): return datetime.now(timezone.utc) + timedelta(days=1) @pytest.fixture(scope='class') def datetime_str(self, datetime_raw): return str(datetime_raw) @pytest.fixture(scope='class') def python_data_dict(self, datetime_raw): return {'dt': datetime_raw} @pytest.fixture(scope='class') def json_dict_data(self, datetime_str): return json.dumps({'dt': datetime_str}) @pytest.mark.benchmark(group='datetime model - python') def test_core_python(self, core_validator, benchmark, python_data_dict): benchmark(core_validator.validate_python, python_data_dict) @pytest.mark.benchmark(group='datetime model - JSON') def test_model_core_json(self, core_validator, benchmark, json_dict_data): benchmark(core_validator.validate_json, json_dict_data) @pytest.mark.benchmark(group='datetime datetime') def test_core_raw(self, benchmark, datetime_raw): v = SchemaValidator(core_schema.datetime_schema()) benchmark(v.validate_python, datetime_raw) @pytest.mark.benchmark(group='datetime str') def test_core_str(self, benchmark, datetime_str): v = SchemaValidator(core_schema.datetime_schema()) benchmark(v.validate_python, datetime_str) @pytest.mark.benchmark(group='datetime future') def test_core_future(self, benchmark, datetime_raw): v = SchemaValidator(core_schema.datetime_schema(gt=datetime.now())) benchmark(v.validate_python, datetime_raw) @pytest.mark.benchmark(group='datetime future') def test_core_future_str(self, benchmark, datetime_str): v = SchemaValidator(core_schema.datetime_schema(gt=datetime.now())) benchmark(v.validate_python, datetime_str)
TestBenchmarkDateTime
python
pytorch__pytorch
torch/_functorch/_aot_autograd/descriptors.py
{ "start": 26398, "end": 26535 }
class ____(AOTOutput): idx: int def expr(self) -> str: return f"__saved_for_backwards_{self.idx}"
SavedForBackwardsAOTOutput
python
great-expectations__great_expectations
contrib/experimental/great_expectations_experimental/expectations/expect_column_kurtosis_to_be_between.py
{ "start": 560, "end": 3242 }
class ____(ColumnAggregateMetricProvider): """MetricProvider Class for Aggregate Mean MetricProvider""" metric_name = "column.custom.kurtosis" @column_aggregate_value(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return stats.kurtosis(column) # @metric_value(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy( # cls, # execution_engine: "SqlAlchemyExecutionEngine", # metric_domain_kwargs: Dict, # metric_value_kwargs: Dict, # metrics: Dict[Tuple, Any], # runtime_configuration: Dict, # ): # ( # selectable, # compute_domain_kwargs, # accessor_domain_kwargs, # ) = execution_engine.get_compute_domain( # metric_domain_kwargs, MetricDomainTypes.COLUMN # ) # column_name = accessor_domain_kwargs["column"] # column = sa.column(column_name) # sqlalchemy_engine = execution_engine.engine # dialect = sqlalchemy_engine.dialect # # column_median = None # # # TODO: compute the value and return it # # return column_median @column_aggregate_partial(engine=SparkDFExecutionEngine) def _spark(cls, column, **kwargs): return F.kurtosis(column) # @classmethod # def _get_evaluation_dependencies( # cls, # metric: MetricConfiguration, # configuration: Optional[ExpectationConfiguration] = None, # execution_engine: Optional[ExecutionEngine] = None, # runtime_configuration: Optional[dict] = None, # ): # """This should return a dictionary: # # { # "dependency_name": MetricConfiguration, # ... # } # """ # # dependencies = super()._get_evaluation_dependencies( # metric=metric, # configuration=configuration, # execution_engine=execution_engine, # runtime_configuration=runtime_configuration, # ) # # table_domain_kwargs = { # k: v for k, v in metric.metric_domain_kwargs.items() if k != "column" # } # # dependencies.update( # { # "table.row_count": MetricConfiguration( # "table.row_count", table_domain_kwargs # ) # } # ) # # if isinstance(execution_engine, SqlAlchemyExecutionEngine): # dependencies["column_values.nonnull.count"] = MetricConfiguration( # "column_values.nonnull.count", metric.metric_domain_kwargs # ) # # return dependencies
ColumnKurtosis
python
getsentry__sentry
src/sentry/api/authentication.py
{ "start": 15465, "end": 21603 }
class ____(StandardAuthentication): token_name = b"bearer" def _find_or_update_token_by_hash(self, token_str: str) -> ApiToken | ApiTokenReplica: """ Find token by hash or update token's hash value if only found via plaintext. 1. Hash provided plaintext token. 2. Perform lookup based on hashed value. 3. If found, return the token. 4. If not found, search for the token based on its plaintext value. 5. If found, update the token's hashed value and return the token. 6. If not found via hash or plaintext value, raise AuthenticationFailed Returns `ApiTokenReplica` if running in REGION silo or `ApiToken` if running in CONTROL silo. """ hashed_token = hashlib.sha256(token_str.encode()).hexdigest() if SiloMode.get_current_mode() == SiloMode.REGION: try: # Try to find the token by its hashed value first return ApiTokenReplica.objects.get(hashed_token=hashed_token) except ApiTokenReplica.DoesNotExist: try: # If we can't find it by hash, use the plaintext string return ApiTokenReplica.objects.get(token=token_str) except ApiTokenReplica.DoesNotExist: # If the token does not exist by plaintext either, it is not a valid token raise AuthenticationFailed("Invalid token") else: try: # Try to find the token by its hashed value first return ApiToken.objects.select_related("user", "application").get( hashed_token=hashed_token ) except ApiToken.DoesNotExist: try: # If we can't find it by hash, use the plaintext string api_token = ApiToken.objects.select_related("user", "application").get( token=token_str ) except ApiToken.DoesNotExist: # If the token does not exist by plaintext either, it is not a valid token raise AuthenticationFailed("Invalid token") else: # Update it with the hashed value if found by plaintext api_token.hashed_token = hashed_token api_token.save(update_fields=["hashed_token"]) return api_token def accepts_auth(self, auth: list[bytes]) -> bool: if not super().accepts_auth(auth): return False # Technically, this will not match if auth length is not 2 # However, we want to run into `authenticate()` in this case, as this throws a more helpful error message if len(auth) != 2: return True token_str = force_str(auth[1]) return not token_str.startswith(SENTRY_ORG_AUTH_TOKEN_PREFIX) def authenticate_token(self, request: Request, token_str: str) -> tuple[Any, Any]: user: AnonymousUser | User | RpcUser | None = AnonymousUser() token: SystemToken | ApiTokenReplica | ApiToken | None = SystemToken.from_request( request, token_str ) application_is_inactive = False if not token: token = self._find_or_update_token_by_hash(token_str) if isinstance(token, ApiTokenReplica): # we're running as a REGION silo user = user_service.get_user(user_id=token.user_id) application_is_inactive = not token.application_is_active else: # the token returned is an ApiToken from the CONTROL silo user = token.user application_is_inactive = ( token.application is not None and not token.application.is_active ) elif isinstance(token, SystemToken): user = token.user if not token: raise AuthenticationFailed("Invalid token") if token.is_expired(): raise AuthenticationFailed("Token expired") if not isinstance(token, SystemToken) and user and not user.is_active: raise AuthenticationFailed("User inactive or deleted") if application_is_inactive: raise AuthenticationFailed("UserApplication inactive or deleted") if token.scoping_organization_id: # We need to make sure the organization to which the token has access is the same as the one in the URL organization = None organization_context = organization_service.get_organization_by_id( id=token.organization_id, include_projects=False, include_teams=False ) if organization_context: organization = organization_context.organization if organization: resolved_url = resolve(request.path_info) target_org_id_or_slug = resolved_url.kwargs.get("organization_id_or_slug") if target_org_id_or_slug: if ( organization.slug != target_org_id_or_slug and organization.id != target_org_id_or_slug ): raise AuthenticationFailed("Unauthorized organization access.") # We want to limit org scoped tokens access to org level endpoints only # Except some none-org level endpoints that we added special treatments for elif resolved_url.url_name not in ["sentry-api-0-organizations"]: raise AuthenticationFailed( "This token access is limited to organization endpoints." ) else: raise AuthenticationFailed("Cannot resolve organization from token.") return self.transform_auth( user, token, "api_token", api_token_type=self.token_name, api_token_is_sentry_app=getattr(user, "is_sentry_app", False), ) @AuthenticationSiloLimit(SiloMode.CONTROL, SiloMode.REGION)
UserAuthTokenAuthentication
python
huggingface__transformers
src/transformers/models/auto/modeling_auto.py
{ "start": 90523, "end": 90773 }
class ____(_BaseAutoModelClass): _model_mapping = MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING AutoModelForSpeechSeq2Seq = auto_class_update( AutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling" )
AutoModelForSpeechSeq2Seq
python
openai__openai-python
src/openai/resources/realtime/realtime.py
{ "start": 8062, "end": 8525 }
class ____: def __init__(self, realtime: AsyncRealtime) -> None: self._realtime = realtime @cached_property def client_secrets(self) -> AsyncClientSecretsWithStreamingResponse: return AsyncClientSecretsWithStreamingResponse(self._realtime.client_secrets) @cached_property def calls(self) -> AsyncCallsWithStreamingResponse: return AsyncCallsWithStreamingResponse(self._realtime.calls)
AsyncRealtimeWithStreamingResponse
python
doocs__leetcode
solution/3700-3799/3708.Longest Fibonacci Subarray/Solution.py
{ "start": 0, "end": 315 }
class ____: def longestSubarray(self, nums: List[int]) -> int: n = len(nums) ans = f = 2 for i in range(2, n): if nums[i] == nums[i - 1] + nums[i - 2]: f = f + 1 ans = max(ans, f) else: f = 2 return ans
Solution
python
euske__pdfminer
pdfminer/layout.py
{ "start": 5107, "end": 5276 }
class ____(LTItem, LTText): def __init__(self, text): self._text = text return def get_text(self): return self._text ## LTChar ##
LTAnno
python
ray-project__ray
python/ray/dag/tests/experimental/test_torch_tensor_dag.py
{ "start": 1088, "end": 3948 }
class ____: def __init__(self): self.device = AcceleratorContext.get().get_accelerator_devices()[0] def init_distributed(self, world_size, rank): torch.distributed.init_process_group( backend="nccl", world_size=world_size, rank=rank ) def send(self, shape, dtype, value: int, send_tensor=True): if not send_tensor: return 1 return torch.ones(shape, dtype=dtype, device=self.device) * value def send_dict(self, entries): results = {} for key, entry in entries.items(): value, shape, dtype = entry results[key] = torch.ones(shape, dtype=dtype) * value return results def send_tensor(self, tensor): return tensor.to(self.device) def send_or_raise(self, shape, dtype, value: int, raise_exception=False): if raise_exception: raise RuntimeError() return torch.ones(shape, dtype=dtype, device=self.device) * value def send_int(self, value: int): return value def recv(self, tensor): return (tensor[0].item(), tensor.shape, tensor.dtype) def recv_on_gpu(self, tensor): assert tensor.device.type == "cuda" return (tensor[0].item(), tensor.shape, tensor.dtype) def recv_and_matmul(self, two_d_tensor): """ Receive the tensor and do some expensive computation (matmul). Args: two_d_tensor: a 2D tensor that has the same size for its dimensions """ # Check that tensor got loaded to the correct device. assert two_d_tensor.dim() == 2 assert two_d_tensor.size(0) == two_d_tensor.size(1) torch.matmul(two_d_tensor, two_d_tensor) return (two_d_tensor[0][0].item(), two_d_tensor.shape, two_d_tensor.dtype) def recv_dict(self, tensor_dict): vals = {} for i, tensor in tensor_dict.items(): vals[i] = self.recv(tensor) return vals def recv_dict_on_gpu(self, tensor_dict): """ Receive a dict of tensors and return a dict of tensors on GPU. It also verifies that the tensors are on GPU. """ vals = {} for i, tensor in tensor_dict.items(): vals[i] = self.recv_on_gpu(tensor) return vals def compute_with_tuple_args(self, args, i: int): shape, dtype, value = args[i] tensor = torch.ones(shape, dtype=dtype, device=self.device) * value return tensor def recv_tensor(self, tensor): return tensor def recv_tensors(self, *tensors): return tuple(tensors) def ping(self): return @ray.method(num_returns=2) def return_two_tensors( self, t1: torch.Tensor, t2: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: return t1, t2 @ray.remote(num_cpus=1)
TorchTensorWorker
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeNarrowingLiteralMember1.py
{ "start": 3688, "end": 3963 }
class ____: event: None | Literal["e"] def func2(e: XD | XE) -> None: if e.event == None: reveal_type(e, expected_text="XE") if e.event == "e": reveal_type(e, expected_text="XE") if e.event == "d": reveal_type(e, expected_text="XD")
XE
python
huggingface__transformers
src/transformers/quantizers/quantizer_spqr.py
{ "start": 1039, "end": 3090 }
class ____(HfQuantizer): """ Quantizer of the SpQR method. Enables the loading of prequantized models. """ requires_calibration = True def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not torch.cuda.is_available(): raise RuntimeError("GPU is required to run SpQR quantized model.") if not is_accelerate_available(): raise ImportError("Using `spqr` quantization requires Accelerate: `pip install accelerate`") if not is_spqr_available(): raise ImportError("Using `spqr` quantization requires SpQR: `pip install spqr_quant[gpu]`") def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype": if dtype is None: dtype = torch.float16 logger.info("Assuming SpQR inference on GPU and loading the model in `torch.float16`.") elif dtype != torch.float16: raise ValueError( "You cannot use any type other than torch.float16 for SpQR. Please either leave it None or set it to" "torch.float16 explicitly." ) return dtype def _process_model_before_weight_loading( self, model: "PreTrainedModel", keep_in_fp32_modules: list[str] | None = None, **kwargs, ): self.modules_to_not_convert = self.get_modules_to_not_convert( model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules ) replace_with_spqr_linear( model, quantization_config=self.quantization_config, modules_to_not_convert=self.modules_to_not_convert, ) model.config.quantization_config = self.quantization_config @property def is_trainable(self): return False def is_serializable(self, safe_serialization=None): return True
SpQRHfQuantizer
python
pydata__xarray
xarray/core/groupby.py
{ "start": 54188, "end": 62540 }
class ____(GroupBy["DataArray"], DataArrayGroupbyArithmetic): """GroupBy object specialized to grouping DataArray objects""" __slots__ = () _dims: tuple[Hashable, ...] | None @property def dims(self) -> tuple[Hashable, ...]: self._raise_if_by_is_chunked() if self._dims is None: index = self.encoded.group_indices[0] self._dims = self._obj.isel({self._group_dim: index}).dims return self._dims def _iter_grouped_shortcut(self): """Fast version of `_iter_grouped` that yields Variables without metadata """ self._raise_if_by_is_chunked() var = self._obj.variable for _idx, indices in enumerate(self.encoded.group_indices): if indices: yield var[{self._group_dim: indices}] def _concat_shortcut(self, applied, dim, positions=None): # nb. don't worry too much about maintaining this method -- it does # speed things up, but it's not very interpretable and there are much # faster alternatives (e.g., doing the grouped aggregation in a # compiled language) # TODO: benbovy - explicit indexes: this fast implementation doesn't # create an explicit index for the stacked dim coordinate stacked = Variable.concat(applied, dim, shortcut=True) reordered = _maybe_reorder(stacked, dim, positions, N=self.group1d.size) return self._obj._replace_maybe_drop_dims(reordered) def _restore_dim_order(self, stacked: DataArray) -> DataArray: def lookup_order(dimension): for grouper in self.groupers: if dimension == grouper.name and grouper.group.ndim == 1: (dimension,) = grouper.group.dims if dimension in self._obj.dims: axis = self._obj.get_axis_num(dimension) else: axis = 1e6 # some arbitrarily high value return axis new_order = sorted(stacked.dims, key=lookup_order) stacked = stacked.transpose( *new_order, transpose_coords=self._restore_coord_dims ) return stacked def map( self, func: Callable[..., DataArray], args: tuple[Any, ...] = (), shortcut: bool | None = None, **kwargs: Any, ) -> DataArray: """Apply a function to each array in the group and concatenate them together into a new array. `func` is called like `func(ar, *args, **kwargs)` for each array `ar` in this group. Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how to stack together the array. The rule is: 1. If the dimension along which the group coordinate is defined is still in the first grouped array after applying `func`, then stack over this dimension. 2. Otherwise, stack over the new dimension given by name of this grouping (the argument to the `groupby` function). Parameters ---------- func : callable Callable to apply to each array. shortcut : bool, optional Whether or not to shortcut evaluation under the assumptions that: (1) The action of `func` does not depend on any of the array metadata (attributes or coordinates) but only on the data and dimensions. (2) The action of `func` creates arrays with homogeneous metadata, that is, with the same dimensions and attributes. If these conditions are satisfied `shortcut` provides significant speedup. This should be the case for many common groupby operations (e.g., applying numpy ufuncs). *args : tuple, optional Positional arguments passed to `func`. **kwargs Used to call `func(ar, **kwargs)` for each array `ar`. Returns ------- applied : DataArray The result of splitting, applying and combining this array. """ grouped = self._iter_grouped_shortcut() if shortcut else self._iter_grouped() applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs)) for arr in grouped) return self._combine(applied, shortcut=shortcut) def apply(self, func, shortcut=False, args=(), **kwargs): """ Backward compatible implementation of ``map`` See Also -------- DataArrayGroupBy.map """ warnings.warn( "GroupBy.apply may be deprecated in the future. Using GroupBy.map is encouraged", PendingDeprecationWarning, stacklevel=2, ) return self.map(func, shortcut=shortcut, args=args, **kwargs) def _combine(self, applied, shortcut=False): """Recombine the applied objects like the original.""" applied_example, applied = peek_at(applied) dim, positions = self._infer_concat_args(applied_example) if shortcut: combined = self._concat_shortcut(applied, dim, positions) else: combined = concat( applied, dim, data_vars="all", coords="different", compat="equals", join="outer", ) combined = _maybe_reorder(combined, dim, positions, N=self.group1d.size) if isinstance(combined, type(self._obj)): # only restore dimension order for arrays combined = self._restore_dim_order(combined) # assign coord and index when the applied function does not return that coord if dim not in applied_example.dims: combined = combined.assign_coords(self.encoded.coords) combined = self._maybe_unstack(combined) combined = self._maybe_reindex(combined) return combined def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, shortcut: bool = True, **kwargs: Any, ) -> DataArray: """Reduce the items in this group by applying `func` along some dimension(s). Parameters ---------- func : callable Function which can be called in the form `func(x, axis=axis, **kwargs)` to return the result of collapsing an np.ndarray over an integer valued axis. dim : "...", str, Iterable of Hashable or None, optional Dimension(s) over which to apply `func`. If None, apply over the groupby dimension, if "..." apply over all dimensions. axis : int or sequence of int, optional Axis(es) over which to apply `func`. Only one of the 'dimension' and 'axis' arguments can be supplied. If neither are supplied, then `func` is calculated over all dimension for each group item. keep_attrs : bool, optional If True, the datasets's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : Array Array with summarized data and the indicated dimension(s) removed. """ if self._by_chunked: raise ValueError( "This method is not supported when lazily grouping by a chunked array. " "Try installing the `flox` package if you are using one of the standard " "reductions (e.g. `mean`). " ) if dim is None: dim = [self._group_dim] if keep_attrs is None: keep_attrs = _get_keep_attrs(default=True) def reduce_array(ar: DataArray) -> DataArray: return ar.reduce( func=func, dim=dim, axis=axis, keep_attrs=keep_attrs, keepdims=keepdims, **kwargs, ) check_reduce_dims(dim, self.dims) return self.map(reduce_array, shortcut=shortcut)
DataArrayGroupByBase
python
yaml__pyyaml
lib/yaml/cyaml.py
{ "start": 1096, "end": 1283 }
class ____(CParser, Constructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) Constructor.__init__(self) Resolver.__init__(self)
CLoader
python
huggingface__transformers
src/transformers/models/sam2/configuration_sam2.py
{ "start": 13323, "end": 16618 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Sam2MaskDecoder`]. It is used to instantiate a SAM2 memory encoder according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the hidden states. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the SAM2 mask decoder. mlp_dim (`int`, *optional*, defaults to 2048): The dimension of the MLP in the two-way transformer. num_hidden_layers (`int`, *optional*, defaults to 2): The number of hidden layers in the two-way transformer. num_attention_heads (`int`, *optional*, defaults to 8): The number of attention heads in the two-way transformer. attention_downsample_rate (`int`, *optional*, defaults to 2): The downsample rate for the attention layers. num_multimask_outputs (`int`, *optional*, defaults to 3): The number of multimask outputs. iou_head_depth (`int`, *optional*, defaults to 3): The depth of the IoU head. iou_head_hidden_dim (`int`, *optional*, defaults to 256): The hidden dimension of the IoU head. dynamic_multimask_via_stability (`bool`, *optional*, defaults to `True`): Whether to use dynamic multimask via stability. dynamic_multimask_stability_delta (`float`, *optional*, defaults to 0.05): The stability delta for the dynamic multimask. dynamic_multimask_stability_thresh (`float`, *optional*, defaults to 0.98): The stability threshold for the dynamic multimask. """ base_config_key = "mask_decoder_config" def __init__( self, hidden_size=256, hidden_act="gelu", mlp_dim=2048, num_hidden_layers=2, num_attention_heads=8, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=256, dynamic_multimask_via_stability=True, dynamic_multimask_stability_delta=0.05, dynamic_multimask_stability_thresh=0.98, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_multimask_outputs = num_multimask_outputs self.hidden_act = hidden_act self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim self.dynamic_multimask_via_stability = dynamic_multimask_via_stability self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh # TwoWayTransformer configuration self.num_hidden_layers = num_hidden_layers self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.mlp_dim = mlp_dim self.attention_downsample_rate = attention_downsample_rate
Sam2MaskDecoderConfig
python
pypa__hatch
tests/cli/status/test_status.py
{ "start": 3047, "end": 5517 }
class ____: def test_no_project(self, hatch, isolation, config_file, helpers): config_file.model.mode = "project" config_file.save() result = hatch("status") assert result.exit_code == 0, result.output assert result.output == helpers.dedent( f""" Mode is set to `project` but no project is set, defaulting to the current directory [Project] - <no project detected> [Location] - {isolation} [Config] - {config_file.path} """ ) def test_unknown_project(self, hatch, isolation, config_file, helpers): project = "foo" config_file.model.mode = "project" config_file.model.project = project config_file.save() result = hatch("status") assert result.exit_code == 0, result.output assert result.output == helpers.dedent( f""" Unable to locate project {project}, defaulting to the current directory [Project] - <no project detected> [Location] - {isolation} [Config] - {config_file.path} """ ) def test_not_a_project(self, hatch, temp_dir, config_file, helpers): project = "foo" config_file.model.mode = "project" config_file.model.project = project config_file.model.projects = {project: str(temp_dir)} config_file.save() result = hatch("status") assert result.exit_code == 0, result.output assert result.output == helpers.dedent( f""" [Project] - {project} (not a project) [Location] - {temp_dir} [Config] - {config_file.path} """ ) @pytest.mark.parametrize("file_name", ["pyproject.toml", "setup.py"]) def test_found_project(self, hatch, temp_dir, config_file, helpers, file_name): project_file = temp_dir / file_name project_file.touch() project = "foo" config_file.model.mode = "project" config_file.model.project = project config_file.model.projects = {project: str(temp_dir)} config_file.save() result = hatch("status") assert result.exit_code == 0, result.output assert result.output == helpers.dedent( f""" [Project] - {project} [Location] - {temp_dir} [Config] - {config_file.path} """ )
TestModeProject
python
getsentry__sentry
tests/sentry/replays/consumers/test_recording.py
{ "start": 1040, "end": 10406 }
class ____(TransactionTestCase): replay_id = uuid.uuid4().hex replay_recording_id = uuid.uuid4().hex force_synchronous = True def get_recording_data(self, segment_id: int) -> memoryview: result = storage_kv.get( _make_recording_filename( project_id=self.project.id, replay_id=self.replay_id, segment_id=segment_id, retention_days=30, ) ) assert result is not None, "Expecting non-None result here" return unpack(zlib.decompress(result))[1] def get_video_data(self, segment_id: int) -> None | tuple[None | memoryview, memoryview]: result = storage_kv.get( _make_recording_filename( project_id=self.project.id, replay_id=self.replay_id, segment_id=segment_id, retention_days=30, ) ) if result: return unpack(zlib.decompress(result))[0] return None def processing_factory(self) -> ProcessReplayRecordingStrategyFactory: return ProcessReplayRecordingStrategyFactory( input_block_size=1, max_batch_size=1, max_batch_time=1, num_processes=1, num_threads=1, output_block_size=1, force_synchronous=self.force_synchronous, ) def submit(self, messages: list[ReplayRecording]) -> None: strategy = self.processing_factory().create_with_partitions(lambda x, force=False: None, {}) for message in messages: strategy.submit( Message( BrokerValue( KafkaPayload(b"key", msgpack.packb(message), [("should_drop", b"1")]), Partition(Topic("ingest-replay-recordings"), 1), 1, datetime.now(), ) ) ) strategy.poll() strategy.join(1) strategy.terminate() def nonchunked_messages( self, message: bytes = b'[{"hello":"world"}]', segment_id: int = 0, compressed: bool = False, replay_event: bytes | None = None, replay_video: bytes | None = None, ) -> list[ReplayRecording]: message = zlib.compress(message) if compressed else message return [ { "type": "replay_recording_not_chunked", "replay_id": self.replay_id, "org_id": self.organization.id, "key_id": 123, "project_id": self.project.id, "received": int(time.time()), "retention_days": 30, "payload": f'{{"segment_id":{segment_id}}}\n'.encode() + message, # type: ignore[typeddict-item] "replay_event": replay_event, # type: ignore[typeddict-item] "replay_video": replay_video, # type: ignore[typeddict-item] "relay_snuba_publish_disabled": True, } ] @patch("sentry.models.OrganizationOnboardingTask.objects.record") @patch("sentry.analytics.record") @patch("sentry.replays.usecases.ingest.track_outcome") @patch("sentry.replays.usecases.ingest.report_hydration_error") @thread_leak_allowlist(reason="replays", issue=97033) def test_end_to_end_consumer_processing_old( self, report_hydration_issue: MagicMock, track_outcome: MagicMock, mock_record: MagicMock, mock_onboarding_task: MagicMock, ) -> None: data = [ { "type": 5, "data": { "tag": "breadcrumb", "payload": { "category": "replay.hydrate-error", "timestamp": 1.0, "data": {"url": "https://sentry.io"}, }, }, } ] segment_id = 0 self.submit( self.nonchunked_messages( message=json.dumps(data).encode(), segment_id=segment_id, compressed=True, replay_event=json.dumps( { "type": "replay_event", "replay_id": self.replay_id, "timestamp": int(time.time()), } ).encode(), replay_video=b"hello, world!", ) ) dat = self.get_recording_data(segment_id) assert json.loads(bytes(dat).decode("utf-8")) == data assert self.get_video_data(segment_id) == b"hello, world!" self.project.refresh_from_db() assert self.project.flags.has_replays mock_onboarding_task.assert_called_with( organization_id=self.project.organization_id, task=OnboardingTask.SESSION_REPLAY, status=OnboardingTaskStatus.COMPLETE, date_completed=ANY, ) assert_any_analytics_event( mock_record, FirstReplaySentEvent( organization_id=self.organization.id, project_id=self.project.id, platform=self.project.platform, user_id=self.organization.default_owner_id, ), ) assert track_outcome.called assert report_hydration_issue.called @patch("sentry.models.OrganizationOnboardingTask.objects.record") @patch("sentry.analytics.record") @patch("sentry.replays.usecases.ingest.track_outcome") @patch("sentry.replays.usecases.ingest.report_hydration_error") @patch( "sentry.replays.usecases.ingest.set_project_flag_and_signal", wraps=set_project_flag_and_signal, ) @thread_leak_allowlist(reason="replays", issue=97033) def test_end_to_end_consumer_processing_new( self, set_project_flag_and_signal: MagicMock, report_hydration_issue: MagicMock, track_outcome: MagicMock, mock_record: MagicMock, mock_onboarding_task: MagicMock, ) -> None: data = [ { "type": 5, "data": { "tag": "breadcrumb", "payload": { "category": "replay.hydrate-error", "timestamp": 1.0, "data": {"url": "https://sentry.io"}, }, }, } ] segment_id = 0 with self.options({"replay.consumer.enable_new_query_caching_system": True}): self.submit( [ *self.nonchunked_messages( message=json.dumps(data).encode(), segment_id=segment_id, compressed=True, replay_event=json.dumps( { "type": "replay_event", "replay_id": self.replay_id, "timestamp": int(time.time()), } ).encode(), replay_video=b"hello, world!", ), *self.nonchunked_messages( message=json.dumps(data).encode(), segment_id=segment_id, compressed=True, replay_event=json.dumps( { "type": "replay_event", "replay_id": self.replay_id, "timestamp": int(time.time()), } ).encode(), replay_video=b"hello, world!", ), ] ) dat = self.get_recording_data(segment_id) assert json.loads(bytes(dat).decode("utf-8")) == data assert self.get_video_data(segment_id) == b"hello, world!" self.project.refresh_from_db() assert bool(self.project.flags.has_replays) is True mock_onboarding_task.assert_called_with( organization_id=self.project.organization_id, task=OnboardingTask.SESSION_REPLAY, status=OnboardingTaskStatus.COMPLETE, date_completed=ANY, ) assert_any_analytics_event( mock_record, FirstReplaySentEvent( organization_id=self.organization.id, project_id=self.project.id, platform=self.project.platform, user_id=self.organization.default_owner_id, ), ) # We emit a new outcome because its a segment-0 event. We emit a hydration error because # thats our cached configuration but we don't emit an onboarding metric because this is # our second event. assert track_outcome.called assert track_outcome.call_count == 2 assert report_hydration_issue.called assert report_hydration_issue.call_count == 2 assert set_project_flag_and_signal.call_count == 1
RecordingTestCase
python
django__django
django/views/generic/dates.py
{ "start": 9613, "end": 12852 }
class ____(MultipleObjectMixin, DateMixin, View): """ Base class for date-based views displaying a list of objects. This requires subclassing to provide a response mixin. """ allow_empty = False date_list_period = "year" def get(self, request, *args, **kwargs): self.date_list, self.object_list, extra_context = self.get_dated_items() context = self.get_context_data( object_list=self.object_list, date_list=self.date_list, **extra_context ) return self.render_to_response(context) def get_dated_items(self): """Obtain the list of dates and items.""" raise NotImplementedError( "A DateView must provide an implementation of get_dated_items()" ) def get_ordering(self): """ Return the field or fields to use for ordering the queryset; use the date field by default. """ return "-%s" % self.get_date_field() if self.ordering is None else self.ordering def get_dated_queryset(self, **lookup): """ Get a queryset properly filtered according to `allow_future` and any extra lookup kwargs. """ qs = self.get_queryset().filter(**lookup) date_field = self.get_date_field() allow_future = self.get_allow_future() allow_empty = self.get_allow_empty() paginate_by = self.get_paginate_by(qs) if not allow_future: now = timezone.now() if self.uses_datetime_field else timezone_today() qs = qs.filter(**{"%s__lte" % date_field: now}) if not allow_empty: # When pagination is enabled, it's better to do a cheap query # than to load the unpaginated queryset in memory. is_empty = not qs if paginate_by is None else not qs.exists() if is_empty: raise Http404( _("No %(verbose_name_plural)s available") % { "verbose_name_plural": qs.model._meta.verbose_name_plural, } ) return qs def get_date_list_period(self): """ Get the aggregation period for the list of dates: 'year', 'month', or 'day'. """ return self.date_list_period def get_date_list(self, queryset, date_type=None, ordering="ASC"): """ Get a date list by calling `queryset.dates/datetimes()`, checking along the way for empty lists that aren't allowed. """ date_field = self.get_date_field() allow_empty = self.get_allow_empty() if date_type is None: date_type = self.get_date_list_period() if self.uses_datetime_field: date_list = queryset.datetimes(date_field, date_type, ordering) else: date_list = queryset.dates(date_field, date_type, ordering) if date_list is not None and not date_list and not allow_empty: raise Http404( _("No %(verbose_name_plural)s available") % { "verbose_name_plural": queryset.model._meta.verbose_name_plural, } ) return date_list
BaseDateListView
python
Textualize__rich
examples/log.py
{ "start": 198, "end": 1943 }
class ____(RegexHighlighter): base_style = "req." highlights = [ r"^(?P<protocol>\w+) (?P<method>\w+) (?P<path>\S+) (?P<result>\w+) (?P<stats>\[.+\])$", r"\/(?P<filename>\w+\..{3,4})", ] theme = Theme( { "req.protocol": Style.parse("dim bold green"), "req.method": Style.parse("bold cyan"), "req.path": Style.parse("magenta"), "req.filename": Style.parse("bright_magenta"), "req.result": Style.parse("yellow"), "req.stats": Style.parse("dim"), } ) console = Console(theme=theme) console.log("Server starting...") console.log("Serving on http://127.0.0.1:8000") time.sleep(1) request_highlighter = RequestHighlighter() console.log( request_highlighter("HTTP GET /foo/bar/baz/egg.html 200 [0.57, 127.0.0.1:59076]"), ) console.log( request_highlighter( "HTTP GET /foo/bar/baz/background.jpg 200 [0.57, 127.0.0.1:59076]" ), ) time.sleep(1) def test_locals(): foo = (1, 2, 3) movies = ["Deadpool", "Rise of the Skywalker"] console = Console() console.log( "[b]JSON[/b] RPC [i]batch[/i]", [ {"jsonrpc": "2.0", "method": "sum", "params": [1, 2, 4], "id": "1"}, {"jsonrpc": "2.0", "method": "notify_hello", "params": [7]}, {"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": "2"}, {"foo": "boo"}, { "jsonrpc": "2.0", "method": "foo.get", "params": {"name": "myself", "enable": False, "grommits": None}, "id": "5", }, {"jsonrpc": "2.0", "method": "get_data", "id": "9"}, ], log_locals=True, ) test_locals()
RequestHighlighter
python
django__django
django/core/management/base.py
{ "start": 569, "end": 1222 }
class ____(Exception): """ Exception class indicating a problem while executing a management command. If this exception is raised during the execution of a management command, it will be caught and turned into a nicely-printed error message to the appropriate output stream (i.e., stderr); as a result, raising this exception (with a sensible description of the error) is the preferred way to indicate that something has gone wrong in the execution of a command. """ def __init__(self, *args, returncode=1, **kwargs): self.returncode = returncode super().__init__(*args, **kwargs)
CommandError
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typedDictClosed5.py
{ "start": 127, "end": 192 }
class ____(TypedDict, extra_items=str): name: str
MovieExtraStr
python
PrefectHQ__prefect
tests/server/models/test_block_documents.py
{ "start": 25820, "end": 34385 }
class ____: @pytest.fixture(autouse=True) async def block_documents(self, session, block_schemas): block_documents = [] block_documents.append( await models.block_documents.create_block_document( session=session, block_document=schemas.actions.BlockDocumentCreate( block_schema_id=block_schemas[0].id, name="block-1", block_type_id=block_schemas[0].block_type_id, ), ) ) block_documents.append( await models.block_documents.create_block_document( session=session, block_document=schemas.actions.BlockDocumentCreate( block_schema_id=block_schemas[1].id, name="block-2", block_type_id=block_schemas[1].block_type_id, data={"x": 1}, ), ) ) block_documents.append( await models.block_documents.create_block_document( session=session, block_document=schemas.actions.BlockDocumentCreate( block_schema_id=block_schemas[2].id, name="block-3", block_type_id=block_schemas[2].block_type_id, data={"y": 2}, ), ) ) block_documents.append( await models.block_documents.create_block_document( session=session, block_document=schemas.actions.BlockDocumentCreate( block_schema_id=block_schemas[1].id, name="block-4", block_type_id=block_schemas[1].block_type_id, ), ) ) block_documents.append( await models.block_documents.create_block_document( session=session, block_document=schemas.actions.BlockDocumentCreate( block_schema_id=block_schemas[2].id, name="block-5", block_type_id=block_schemas[2].block_type_id, ), ) ) block_documents.append( await models.block_documents.create_block_document( session=session, block_document=schemas.actions.BlockDocumentCreate( block_schema_id=block_schemas[2].id, block_type_id=block_schemas[2].block_type_id, is_anonymous=True, ), ) ) block_documents.append( await models.block_documents.create_block_document( session=session, block_document=schemas.actions.BlockDocumentCreate( name="nested-block-1", block_schema_id=block_schemas[3].id, block_type_id=block_schemas[3].block_type_id, data={ "b": {"$ref": {"block_document_id": block_documents[1].id}}, "z": "index", }, ), ) ) block_documents.append( await models.block_documents.create_block_document( session=session, block_document=schemas.actions.BlockDocumentCreate( name="nested-block-2", block_schema_id=block_schemas[4].id, block_type_id=block_schemas[4].block_type_id, data={ "c": {"$ref": {"block_document_id": block_documents[2].id}}, "d": {"$ref": {"block_document_id": block_documents[5].id}}, }, ), ) ) await session.commit() return sorted(block_documents, key=lambda b: b.name) async def test_read_block_documents(self, session, block_documents): read_blocks = await models.block_documents.read_block_documents(session=session) # by default, exclude anonymous block documents assert {b.id for b in read_blocks} == { b.id for b in block_documents if not b.is_anonymous } # sorted by block document name assert [rb.id for rb in read_blocks] == [ b.id for b in block_documents if not b.is_anonymous ] async def test_read_block_documents_sorts_by_block_type_name_name( self, session, block_documents ): sorted_blocks = sorted( block_documents, key=lambda b: (b.block_type.name, b.name) ) read_blocks = await models.block_documents.read_block_documents( session=session, sort=schemas.sorting.BlockDocumentSort.BLOCK_TYPE_AND_NAME_ASC, ) # by default, exclude anonymous block documents assert {b.id for b in read_blocks} == { b.id for b in sorted_blocks if not b.is_anonymous } # sorted by block document name assert [rb.id for rb in read_blocks] == [ b.id for b in sorted_blocks if not b.is_anonymous ] async def test_read_block_documents_with_is_anonymous_filter( self, session, block_documents ): non_anonymous_block_documents = ( await models.block_documents.read_block_documents( session=session, block_document_filter=schemas.filters.BlockDocumentFilter( is_anonymous=dict(eq_=False) ), ) ) anonymous_block_documents = await models.block_documents.read_block_documents( session=session, block_document_filter=schemas.filters.BlockDocumentFilter( is_anonymous=dict(eq_=True) ), ) all_block_documents = await models.block_documents.read_block_documents( session=session, block_document_filter=schemas.filters.BlockDocumentFilter( is_anonymous=None ), ) assert {b.id for b in non_anonymous_block_documents} == { b.id for b in block_documents if not b.is_anonymous } assert {b.id for b in anonymous_block_documents} == { b.id for b in block_documents if b.is_anonymous } assert {b.id for b in all_block_documents} == {b.id for b in block_documents} async def test_read_block_documents_limit_offset(self, session, block_documents): # sorted by block type name, block name read_block_documents = await models.block_documents.read_block_documents( session=session, limit=2 ) assert [b.id for b in read_block_documents] == [ block_documents[1].id, block_documents[2].id, ] read_block_documents = await models.block_documents.read_block_documents( session=session, limit=2, offset=2 ) assert [b.id for b in read_block_documents] == [ block_documents[3].id, block_documents[4].id, ] async def test_read_block_documents_filter_capabilities( self, session, block_documents ): fly_and_swim_block_documents = ( await models.block_documents.read_block_documents( session=session, block_schema_filter=schemas.filters.BlockSchemaFilter( block_capabilities=dict(all_=["fly", "swim"]) ), ) ) assert len(fly_and_swim_block_documents) == 1 assert [b.id for b in fly_and_swim_block_documents] == [block_documents[6].id] fly_block_documents = await models.block_documents.read_block_documents( session=session, block_schema_filter=schemas.filters.BlockSchemaFilter( block_capabilities=dict(all_=["fly"]) ), ) assert len(fly_block_documents) == 3 assert [b.id for b in fly_block_documents] == [ block_documents[2].id, block_documents[4].id, block_documents[6].id, ] swim_block_documents = await models.block_documents.read_block_documents( session=session, block_schema_filter=schemas.filters.BlockSchemaFilter( block_capabilities=dict(all_=["swim"]) ), ) assert len(swim_block_documents) == 1 assert [b.id for b in swim_block_documents] == [block_documents[6].id]
TestReadBlockDocuments
python
pytorch__pytorch
test/lazy/test_functionalization.py
{ "start": 278, "end": 2936 }
class ____(TestCase): def test_lazy_init_with_view(self): def f(device, reset_storage=False): torch.manual_seed(2023) if device == "lazy": metrics.reset() class Model(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(4, 2, bias=False) def forward(self, x): return x @ self.fc1.weight.transpose(0, 1) with torch.device(device): model = Model() if device == "lazy": if reset_storage: torch._C._unsafe_reset_storage(model.fc1.weight) torch._lazy.mark_step() sync_tensors = metrics.counter_value("SyncedTensorsWithIR") if reset_storage: assert sync_tensors == 1 else: # There is an extra tensor being unnecessarily synced if # the functional storage is not reset. assert sync_tensors == 2 x = torch.ones(4) out = model(x) if device == "lazy": torch._lazy.mark_step() return out cpu_out = f("cpu") lazy_out_1 = f("lazy", reset_storage=False) lazy_out_2 = f("lazy", reset_storage=True) self.assertEqual(cpu_out, lazy_out_1.to("cpu")) self.assertEqual(cpu_out, lazy_out_2.to("cpu")) def test_data_assign(self): def text(lazyt): raw = torch._C._lazy._get_tensors_text([lazyt]) return NODE_TYPE_PATTERN.sub("", raw) origin = torch.rand(3, dtype=torch.float32) tensor = origin.to("lazy") self.assertExpectedInline( text(tensor), """\ IR { %0 = [Float[3]] lazy_tensors::device_data(), device=CPU0, ROOT=0 } """, ) # Modify the data-type of tensor, and assign it to 'data'. # This should update the inner tensor of FunctionalTensorWrapper, # changing the corresponding IR node. modified_tensor = tensor.to(torch.bfloat16) tensor.data = modified_tensor self.assertExpectedInline( text(tensor), """\ IR { %0 = [Float[3]] lazy_tensors::device_data(), device=CPU0 %1 = [BFloat16[3]] aten::_to_copy(%0), dtype=BFloat16, layout=null, device=null, pin_memory=null, non_blocking=0, memory_format=null, ROOT=0 } """, # noqa: B950 ) if __name__ == "__main__": run_tests()
LazyFuncionalizationTest
python
django__django
tests/backends/tests.py
{ "start": 10030, "end": 10806 }
class ____(TransactionTestCase): available_apps = [] # Unfortunately with sqlite3 the in-memory test database cannot be closed, # and so it cannot be re-opened during testing. @skipUnlessDBFeature("test_db_allows_multiple_connections") def test_signal(self): data = {} def receiver(sender, connection, **kwargs): data["connection"] = connection connection_created.connect(receiver) connection.close() with connection.cursor(): pass self.assertIs(data["connection"].connection, connection.connection) connection_created.disconnect(receiver) data.clear() with connection.cursor(): pass self.assertEqual(data, {})
ConnectionCreatedSignalTest
python
networkx__networkx
networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py
{ "start": 27201, "end": 46638 }
class ____: def test_const_covered_neighbors(self): G1 = nx.Graph([(0, 1), (1, 2), (3, 0), (3, 2)]) G2 = nx.Graph([("a", "b"), ("b", "c"), ("k", "a"), ("k", "c")]) gparams = _GraphParameters(G1, G2, None, None, None, None, None) sparams = _StateParameters( {0: "a", 1: "b", 2: "c"}, {"a": 0, "b": 1, "c": 2}, None, None, None, None, None, None, None, None, ) u, v = 3, "k" assert _consistent_PT(u, v, gparams, sparams) def test_const_no_covered_neighbors(self): G1 = nx.Graph([(0, 1), (1, 2), (3, 4), (3, 5)]) G2 = nx.Graph([("a", "b"), ("b", "c"), ("k", "w"), ("k", "z")]) gparams = _GraphParameters(G1, G2, None, None, None, None, None) sparams = _StateParameters( {0: "a", 1: "b", 2: "c"}, {"a": 0, "b": 1, "c": 2}, None, None, None, None, None, None, None, None, ) u, v = 3, "k" assert _consistent_PT(u, v, gparams, sparams) def test_const_mixed_covered_uncovered_neighbors(self): G1 = nx.Graph([(0, 1), (1, 2), (3, 0), (3, 2), (3, 4), (3, 5)]) G2 = nx.Graph( [("a", "b"), ("b", "c"), ("k", "a"), ("k", "c"), ("k", "w"), ("k", "z")] ) gparams = _GraphParameters(G1, G2, None, None, None, None, None) sparams = _StateParameters( {0: "a", 1: "b", 2: "c"}, {"a": 0, "b": 1, "c": 2}, None, None, None, None, None, None, None, None, ) u, v = 3, "k" assert _consistent_PT(u, v, gparams, sparams) def test_const_fail_cases(self): G1 = nx.Graph( [ (0, 1), (1, 2), (10, 0), (10, 3), (10, 4), (10, 5), (10, 6), (4, 1), (5, 3), ] ) G2 = nx.Graph( [ ("a", "b"), ("b", "c"), ("k", "a"), ("k", "d"), ("k", "e"), ("k", "f"), ("k", "g"), ("e", "b"), ("f", "d"), ] ) gparams = _GraphParameters(G1, G2, None, None, None, None, None) sparams = _StateParameters( {0: "a", 1: "b", 2: "c", 3: "d"}, {"a": 0, "b": 1, "c": 2, "d": 3}, None, None, None, None, None, None, None, None, ) u, v = 10, "k" assert _consistent_PT(u, v, gparams, sparams) # Delete one uncovered neighbor of u. Notice how it still passes the test. # Two reasons for this: # 1. If u, v had different degrees from the beginning, they wouldn't # be selected as candidates in the first place. # 2. Even if they are selected, consistency is basically 1-look-ahead, # meaning that we take into consideration the relation of the # candidates with their mapped neighbors. The node we deleted is # not a covered neighbor. # Such nodes will be checked by the cut_PT function, which is # basically the 2-look-ahead, checking the relation of the # candidates with T1, T2 (in which belongs the node we just deleted). G1.remove_node(6) assert _consistent_PT(u, v, gparams, sparams) # Add one more covered neighbor of u in G1 G1.add_edge(u, 2) assert not _consistent_PT(u, v, gparams, sparams) # Compensate in G2 G2.add_edge(v, "c") assert _consistent_PT(u, v, gparams, sparams) # Add one more covered neighbor of v in G2 G2.add_edge(v, "x") G1.add_node(7) sparams.mapping.update({7: "x"}) sparams.reverse_mapping.update({"x": 7}) assert not _consistent_PT(u, v, gparams, sparams) # Compendate in G1 G1.add_edge(u, 7) assert _consistent_PT(u, v, gparams, sparams) @pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) def test_cut_inconsistent_labels(self, graph_type): G1 = graph_type( [ (0, 1), (1, 2), (10, 0), (10, 3), (10, 4), (10, 5), (10, 6), (4, 1), (5, 3), ] ) G2 = graph_type( [ ("a", "b"), ("b", "c"), ("k", "a"), ("k", "d"), ("k", "e"), ("k", "f"), ("k", "g"), ("e", "b"), ("f", "d"), ] ) l1 = {n: "blue" for n in G1.nodes()} l2 = {n: "blue" for n in G2.nodes()} l1.update({6: "green"}) # Change the label of one neighbor of u gparams = _GraphParameters( G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None ) sparams = _StateParameters( {0: "a", 1: "b", 2: "c", 3: "d"}, {"a": 0, "b": 1, "c": 2, "d": 3}, None, None, None, None, None, None, None, None, ) u, v = 10, "k" assert _cut_PT(u, v, gparams, sparams) def test_cut_consistent_labels(self): G1 = nx.Graph( [ (0, 1), (1, 2), (10, 0), (10, 3), (10, 4), (10, 5), (10, 6), (4, 1), (5, 3), ] ) G2 = nx.Graph( [ ("a", "b"), ("b", "c"), ("k", "a"), ("k", "d"), ("k", "e"), ("k", "f"), ("k", "g"), ("e", "b"), ("f", "d"), ] ) l1 = {n: "blue" for n in G1.nodes()} l2 = {n: "blue" for n in G2.nodes()} gparams = _GraphParameters( G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None ) sparams = _StateParameters( {0: "a", 1: "b", 2: "c", 3: "d"}, {"a": 0, "b": 1, "c": 2, "d": 3}, {4, 5}, None, {6}, None, {"e", "f"}, None, {"g"}, None, ) u, v = 10, "k" assert not _cut_PT(u, v, gparams, sparams) def test_cut_same_labels(self): G1 = nx.Graph( [ (0, 1), (1, 2), (10, 0), (10, 3), (10, 4), (10, 5), (10, 6), (4, 1), (5, 3), ] ) mapped = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 10: "k"} G2 = nx.relabel_nodes(G1, mapped) l1 = {n: "blue" for n in G1.nodes()} l2 = {n: "blue" for n in G2.nodes()} gparams = _GraphParameters( G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None ) sparams = _StateParameters( {0: "a", 1: "b", 2: "c", 3: "d"}, {"a": 0, "b": 1, "c": 2, "d": 3}, {4, 5}, None, {6}, None, {"e", "f"}, None, {"g"}, None, ) u, v = 10, "k" assert not _cut_PT(u, v, gparams, sparams) # Change intersection between G1[u] and T1, so it's not the same as the # one between G2[v] and T2 G1.remove_edge(u, 4) assert _cut_PT(u, v, gparams, sparams) # Compensate in G2 G2.remove_edge(v, mapped[4]) assert not _cut_PT(u, v, gparams, sparams) # Change intersection between G2[v] and T2_tilde, so it's not the same # as the one between G1[u] and T1_tilde G2.remove_edge(v, mapped[6]) assert _cut_PT(u, v, gparams, sparams) # Compensate in G1 G1.remove_edge(u, 6) assert not _cut_PT(u, v, gparams, sparams) # Add disconnected nodes, which will form the new Ti_out G1.add_nodes_from([6, 7, 8]) G2.add_nodes_from(["g", "y", "z"]) sparams.T1_tilde.update({6, 7, 8}) sparams.T2_tilde.update({"g", "y", "z"}) l1 = {n: "blue" for n in G1.nodes()} l2 = {n: "blue" for n in G2.nodes()} gparams = _GraphParameters( G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None ) assert not _cut_PT(u, v, gparams, sparams) # Add some new nodes to the mapping sparams.mapping.update({6: "g", 7: "y"}) sparams.reverse_mapping.update({"g": 6, "y": 7}) # Add more nodes to T1, T2. G1.add_edges_from([(6, 20), (7, 20), (6, 21)]) G2.add_edges_from([("g", "i"), ("g", "j"), ("y", "j")]) sparams.mapping.update({20: "j", 21: "i"}) sparams.reverse_mapping.update({"j": 20, "i": 21}) sparams.T1.update({20, 21}) sparams.T2.update({"i", "j"}) sparams.T1_tilde.difference_update({6, 7}) sparams.T2_tilde.difference_update({"g", "y"}) assert not _cut_PT(u, v, gparams, sparams) # Add nodes from the new T1 and T2, as neighbors of u and v respectively G1.add_edges_from([(u, 20), (u, 21)]) G2.add_edges_from([(v, "i"), (v, "j")]) l1 = {n: "blue" for n in G1.nodes()} l2 = {n: "blue" for n in G2.nodes()} gparams = _GraphParameters( G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None ) assert not _cut_PT(u, v, gparams, sparams) # Change the edges, maintaining the G1[u]-T1 intersection G1.remove_edge(u, 20) G1.add_edge(u, 4) assert not _cut_PT(u, v, gparams, sparams) # Connect u to 8 which is still in T1_tilde G1.add_edge(u, 8) assert _cut_PT(u, v, gparams, sparams) # Same for v and z, so that inters(G1[u], T1out) == inters(G2[v], T2out) G2.add_edge(v, "z") assert not _cut_PT(u, v, gparams, sparams) def test_cut_different_labels(self): G1 = nx.Graph( [ (0, 1), (1, 2), (1, 14), (0, 4), (1, 5), (2, 6), (3, 7), (3, 6), (4, 10), (4, 9), (6, 10), (20, 9), (20, 15), (20, 12), (20, 11), (12, 13), (11, 13), (20, 8), (20, 3), (20, 5), (20, 0), ] ) mapped = { 0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 7: "h", 8: "i", 9: "j", 10: "k", 11: "l", 12: "m", 13: "n", 14: "o", 15: "p", 20: "x", } G2 = nx.relabel_nodes(G1, mapped) l1 = {n: "none" for n in G1.nodes()} l2 = {} l1.update( { 9: "blue", 15: "blue", 12: "blue", 11: "green", 3: "green", 8: "red", 0: "red", 5: "yellow", } ) l2.update({mapped[n]: l for n, l in l1.items()}) gparams = _GraphParameters( G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None ) sparams = _StateParameters( {0: "a", 1: "b", 2: "c", 3: "d"}, {"a": 0, "b": 1, "c": 2, "d": 3}, {4, 5, 6, 7, 14}, None, {9, 10, 15, 12, 11, 13, 8}, None, {"e", "f", "g", "h", "o"}, None, {"j", "k", "l", "m", "n", "i", "p"}, None, ) u, v = 20, "x" assert not _cut_PT(u, v, gparams, sparams) # Change the orientation of the labels on neighbors of u compared to # neighbors of v. Leave the structure intact l1.update({9: "red"}) assert _cut_PT(u, v, gparams, sparams) # compensate in G2 l2.update({mapped[9]: "red"}) assert not _cut_PT(u, v, gparams, sparams) # Change the intersection of G1[u] and T1 G1.add_edge(u, 4) assert _cut_PT(u, v, gparams, sparams) # Same for G2[v] and T2 G2.add_edge(v, mapped[4]) assert not _cut_PT(u, v, gparams, sparams) # Change the intersection of G2[v] and T2_tilde G2.remove_edge(v, mapped[8]) assert _cut_PT(u, v, gparams, sparams) # Same for G1[u] and T1_tilde G1.remove_edge(u, 8) assert not _cut_PT(u, v, gparams, sparams) # Place 8 and mapped[8] in T1 and T2 respectively, by connecting it to covered nodes G1.add_edge(8, 3) G2.add_edge(mapped[8], mapped[3]) sparams.T1.add(8) sparams.T2.add(mapped[8]) sparams.T1_tilde.remove(8) sparams.T2_tilde.remove(mapped[8]) assert not _cut_PT(u, v, gparams, sparams) # Remove neighbor of u from T1 G1.remove_node(5) l1.pop(5) sparams.T1.remove(5) assert _cut_PT(u, v, gparams, sparams) # Same in G2 G2.remove_node(mapped[5]) l2.pop(mapped[5]) sparams.T2.remove(mapped[5]) assert not _cut_PT(u, v, gparams, sparams) def test_feasibility_same_labels(self): G1 = nx.Graph( [ (0, 1), (1, 2), (1, 14), (0, 4), (1, 5), (2, 6), (3, 7), (3, 6), (4, 10), (4, 9), (6, 10), (20, 9), (20, 15), (20, 12), (20, 11), (12, 13), (11, 13), (20, 8), (20, 2), (20, 5), (20, 0), ] ) mapped = { 0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 7: "h", 8: "i", 9: "j", 10: "k", 11: "l", 12: "m", 13: "n", 14: "o", 15: "p", 20: "x", } G2 = nx.relabel_nodes(G1, mapped) l1 = {n: "blue" for n in G1.nodes()} l2 = {mapped[n]: "blue" for n in G1.nodes()} gparams = _GraphParameters( G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None ) sparams = _StateParameters( {0: "a", 1: "b", 2: "c", 3: "d"}, {"a": 0, "b": 1, "c": 2, "d": 3}, {4, 5, 6, 7, 14}, None, {9, 10, 15, 12, 11, 13, 8}, None, {"e", "f", "g", "h", "o"}, None, {"j", "k", "l", "m", "n", "i", "p"}, None, ) u, v = 20, "x" assert not _cut_PT(u, v, gparams, sparams) # Change structure in G2 such that, ONLY consistency is harmed G2.remove_edge(mapped[20], mapped[2]) G2.add_edge(mapped[20], mapped[3]) # Consistency check fails, while the cutting rules are satisfied! assert not _cut_PT(u, v, gparams, sparams) assert not _consistent_PT(u, v, gparams, sparams) # Compensate in G1 and make it consistent G1.remove_edge(20, 2) G1.add_edge(20, 3) assert not _cut_PT(u, v, gparams, sparams) assert _consistent_PT(u, v, gparams, sparams) # ONLY fail the cutting check G2.add_edge(v, mapped[10]) assert _cut_PT(u, v, gparams, sparams) assert _consistent_PT(u, v, gparams, sparams) def test_feasibility_different_labels(self): G1 = nx.Graph( [ (0, 1), (1, 2), (1, 14), (0, 4), (1, 5), (2, 6), (3, 7), (3, 6), (4, 10), (4, 9), (6, 10), (20, 9), (20, 15), (20, 12), (20, 11), (12, 13), (11, 13), (20, 8), (20, 2), (20, 5), (20, 0), ] ) mapped = { 0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 7: "h", 8: "i", 9: "j", 10: "k", 11: "l", 12: "m", 13: "n", 14: "o", 15: "p", 20: "x", } G2 = nx.relabel_nodes(G1, mapped) l1 = {n: "none" for n in G1.nodes()} l2 = {} l1.update( { 9: "blue", 15: "blue", 12: "blue", 11: "green", 2: "green", 8: "red", 0: "red", 5: "yellow", } ) l2.update({mapped[n]: l for n, l in l1.items()}) gparams = _GraphParameters( G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None ) sparams = _StateParameters( {0: "a", 1: "b", 2: "c", 3: "d"}, {"a": 0, "b": 1, "c": 2, "d": 3}, {4, 5, 6, 7, 14}, None, {9, 10, 15, 12, 11, 13, 8}, None, {"e", "f", "g", "h", "o"}, None, {"j", "k", "l", "m", "n", "i", "p"}, None, ) u, v = 20, "x" assert not _cut_PT(u, v, gparams, sparams) # Change structure in G2 such that, ONLY consistency is harmed G2.remove_edge(mapped[20], mapped[2]) G2.add_edge(mapped[20], mapped[3]) l2.update({mapped[3]: "green"}) # Consistency check fails, while the cutting rules are satisfied! assert not _cut_PT(u, v, gparams, sparams) assert not _consistent_PT(u, v, gparams, sparams) # Compensate in G1 and make it consistent G1.remove_edge(20, 2) G1.add_edge(20, 3) l1.update({3: "green"}) assert not _cut_PT(u, v, gparams, sparams) assert _consistent_PT(u, v, gparams, sparams) # ONLY fail the cutting check l1.update({5: "red"}) assert _cut_PT(u, v, gparams, sparams) assert _consistent_PT(u, v, gparams, sparams)
TestGraphISOFeasibility
python
apache__airflow
task-sdk/tests/task_sdk/definitions/test_dag.py
{ "start": 19899, "end": 24145 }
class ____: DEFAULT_ARGS = { "owner": "test", "depends_on_past": True, "start_date": datetime.now(tz=timezone.utc), "retries": 1, "retry_delay": timedelta(minutes=1), } VALUE = 42 def test_dag_decorator_without_args(self): """Test that @dag can be used without any arguments.""" @dag_decorator def noop_pipeline(): ... dag = noop_pipeline() assert dag.dag_id == "noop_pipeline" def test_fileloc(self): @dag_decorator(schedule=None, default_args=self.DEFAULT_ARGS) def noop_pipeline(): ... dag = noop_pipeline() assert isinstance(dag, DAG) assert dag.dag_id == "noop_pipeline" assert dag.fileloc == __file__ def test_set_dag_id(self): """Test that checks you can set dag_id from decorator.""" @dag_decorator("test", schedule=None, default_args=self.DEFAULT_ARGS) def noop_pipeline(): ... dag = noop_pipeline() assert isinstance(dag, DAG) assert dag.dag_id == "test" def test_default_dag_id(self): """Test that @dag uses function name as default dag id.""" @dag_decorator(schedule=None, default_args=self.DEFAULT_ARGS) def noop_pipeline(): ... dag = noop_pipeline() assert isinstance(dag, DAG) assert dag.dag_id == "noop_pipeline" @pytest.mark.parametrize( ("dag_doc_md", "expected_doc_md"), [ pytest.param("dag docs.", "dag docs.", id="use_dag_doc_md"), pytest.param(None, "Regular Dag documentation", id="use_dag_docstring"), ], ) def test_documentation_added(self, dag_doc_md, expected_doc_md): """Test that @dag uses function docs as doc_md for Dag object if doc_md is not explicitly set.""" @dag_decorator(schedule=None, default_args=self.DEFAULT_ARGS, doc_md=dag_doc_md) def noop_pipeline(): """Regular Dag documentation""" dag = noop_pipeline() assert isinstance(dag, DAG) assert dag.dag_id == "noop_pipeline" assert dag.doc_md == expected_doc_md def test_fails_if_arg_not_set(self): """Test that @dag decorated function fails if positional argument is not set""" @dag_decorator(schedule=None, default_args=self.DEFAULT_ARGS) def noop_pipeline(value): ... # Test that if arg is not passed it raises a type error as expected. with pytest.raises(TypeError): noop_pipeline() def test_documentation_template_rendered(self): """Test that @dag uses function docs as doc_md for Dag object""" @dag_decorator(schedule=None, default_args=self.DEFAULT_ARGS) def noop_pipeline(): """ {% if True %} Regular Dag documentation {% endif %} """ dag = noop_pipeline() assert dag.dag_id == "noop_pipeline" assert "Regular Dag documentation" in dag.doc_md def test_resolve_documentation_template_file_not_rendered(self, tmp_path): """Test that @dag uses function docs as doc_md for Dag object""" raw_content = """ {% if True %} External Markdown Dag documentation {% endif %} """ path = tmp_path / "testfile.md" path.write_text(raw_content) @dag_decorator("test-dag", schedule=None, start_date=DEFAULT_DATE, doc_md=str(path)) def markdown_docs(): ... dag = markdown_docs() assert dag.dag_id == "test-dag" assert dag.doc_md == raw_content def test_dag_param_resolves(self): """Test that dag param is correctly resolved by operator""" from airflow.decorators import task @dag_decorator(schedule=None, default_args=self.DEFAULT_ARGS) def xcom_pass_to_op(value=self.VALUE): @task def return_num(num): return num xcom_arg = return_num(value) self.operator = xcom_arg.operator xcom_pass_to_op() assert isinstance(self.operator.op_args[0], DagParam) self.operator.render_template_fields({}) assert self.operator.op_args[0] == 42
TestDagDecorator
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1476406, "end": 1477232 }
class ____(sgqlc.types.Type, Node): """Represents an 'review_requested' event on a given pull request.""" __schema__ = github_schema __field_names__ = ("actor", "created_at", "pull_request", "requested_reviewer") actor = sgqlc.types.Field(Actor, graphql_name="actor") """Identifies the actor who performed the event.""" created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" pull_request = sgqlc.types.Field(sgqlc.types.non_null(PullRequest), graphql_name="pullRequest") """PullRequest referenced by event.""" requested_reviewer = sgqlc.types.Field("RequestedReviewer", graphql_name="requestedReviewer") """Identifies the reviewer whose review was requested."""
ReviewRequestedEvent
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1119492, "end": 1120131 }
class ____(sgqlc.types.Type, Node): """Represents a 'converted_to_discussion' event on a given issue.""" __schema__ = github_schema __field_names__ = ("actor", "created_at", "discussion") actor = sgqlc.types.Field(Actor, graphql_name="actor") """Identifies the actor who performed the event.""" created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" discussion = sgqlc.types.Field("Discussion", graphql_name="discussion") """The discussion that the issue was converted into."""
ConvertedToDiscussionEvent
python
scipy__scipy
scipy/special/tests/test_basic.py
{ "start": 178888, "end": 179521 }
class ____: def test_round(self): rnd = list(map(int, (special.round(10.1), special.round(10.4), special.round(10.5), special.round(10.6)))) # Note: According to the documentation, scipy.special.round is # supposed to round to the nearest even number if the fractional # part is exactly 0.5. On some platforms, this does not appear # to work and thus this test may fail. However, this unit test is # correctly written. rndrl = (10,10,10,11) assert_array_equal(rnd,rndrl)
TestRound
python
facelessuser__pymdown-extensions
tests/test_extensions/test_snippets.py
{ "start": 30864, "end": 31841 }
class ____(util.MdCase): """Test snippet URL cases with missing URL and 'check paths'.""" extension = [ 'pymdownx.snippets', ] extension_configs = { 'pymdownx.snippets': { 'base_path': [os.path.join(BASE, '_snippets')], 'url_download': True, 'url_max_size': 0, 'check_paths': True } } @patch('urllib.request.urlopen') def test_missing(self, mock_urlopen): """Test missing URL.""" cm = MagicMock() cm.status = 404 cm.code = 404 cm.read.return_value = b'' cm.headers = {'content-length': '0'} cm.__enter__.return_value = cm mock_urlopen.return_value = cm with self.assertRaises(SnippetMissingError): self.check_markdown( R''' --8<-- "https://test.com/myfile.md" ''', '', True )
TestURLSnippetsMissing
python
ray-project__ray
python/ray/data/_internal/execution/interfaces/op_runtime_metrics.py
{ "start": 7175, "end": 37561 }
class ____(metaclass=OpRuntimesMetricsMeta): """Runtime metrics for a 'PhysicalOperator'. Metrics are updated dynamically during the execution of the Dataset. This class can be used for either observablity or scheduling purposes. DO NOT modify the fields of this class directly. Instead, use the provided callback methods. """ # TODO(hchen): Fields tagged with "map_only" currently only work for MapOperator. # We should make them work for all operators by unifying the task execution code. # === Inputs-related metrics === num_inputs_received: int = metric_field( default=0, description="Number of input blocks received by operator.", metrics_group=MetricsGroup.INPUTS, ) num_row_inputs_received: int = metric_field( default=0, description="Number of input rows received by operator.", metrics_group=MetricsGroup.INPUTS, ) bytes_inputs_received: int = metric_field( default=0, description="Byte size of input blocks received by operator.", metrics_group=MetricsGroup.INPUTS, ) num_task_inputs_processed: int = metric_field( default=0, description=( "Number of input blocks that operator's tasks have finished processing." ), metrics_group=MetricsGroup.INPUTS, ) bytes_task_inputs_processed: int = metric_field( default=0, description=( "Byte size of input blocks that operator's tasks have finished processing." ), metrics_group=MetricsGroup.INPUTS, ) bytes_inputs_of_submitted_tasks: int = metric_field( default=0, description="Byte size of input blocks passed to submitted tasks.", metrics_group=MetricsGroup.INPUTS, ) rows_inputs_of_submitted_tasks: int = metric_field( default=0, description="Number of rows in the input blocks passed to submitted tasks.", metrics_group=MetricsGroup.INPUTS, ) # === Outputs-related metrics === num_task_outputs_generated: int = metric_field( default=0, description="Number of output blocks generated by tasks.", metrics_group=MetricsGroup.OUTPUTS, ) bytes_task_outputs_generated: int = metric_field( default=0, description="Byte size of output blocks generated by tasks.", metrics_group=MetricsGroup.OUTPUTS, ) rows_task_outputs_generated: int = metric_field( default=0, description="Number of output rows generated by tasks.", metrics_group=MetricsGroup.OUTPUTS, ) row_outputs_taken: int = metric_field( default=0, description="Number of rows that are already taken by downstream operators.", metrics_group=MetricsGroup.OUTPUTS, ) block_outputs_taken: int = metric_field( default=0, description="Number of blocks that are already taken by downstream operators.", metrics_group=MetricsGroup.OUTPUTS, ) num_outputs_taken: int = metric_field( default=0, description=( "Number of output blocks that are already taken by downstream operators." ), metrics_group=MetricsGroup.OUTPUTS, ) bytes_outputs_taken: int = metric_field( default=0, description=( "Byte size of output blocks that are already taken by downstream operators." ), metrics_group=MetricsGroup.OUTPUTS, ) num_outputs_of_finished_tasks: int = metric_field( default=0, description="Number of generated output blocks that are from finished tasks.", metrics_group=MetricsGroup.OUTPUTS, ) bytes_outputs_of_finished_tasks: int = metric_field( default=0, description=( "Total byte size of generated output blocks produced by finished tasks." ), metrics_group=MetricsGroup.OUTPUTS, ) rows_outputs_of_finished_tasks: int = metric_field( default=0, description=("Number of rows generated by finished tasks."), metrics_group=MetricsGroup.OUTPUTS, ) num_external_inqueue_blocks: int = metric_field( default=0, description="Number of blocks in the external inqueue", metrics_group=MetricsGroup.OUTPUTS, ) num_external_inqueue_bytes: int = metric_field( default=0, description="Byte size of blocks in the external inqueue", metrics_group=MetricsGroup.OUTPUTS, ) num_external_outqueue_blocks: int = metric_field( default=0, description="Number of blocks in the external outqueue", metrics_group=MetricsGroup.OUTPUTS, ) num_external_outqueue_bytes: int = metric_field( default=0, description="Byte size of blocks in the external outqueue", metrics_group=MetricsGroup.OUTPUTS, ) # === Tasks-related metrics === num_tasks_submitted: int = metric_field( default=0, description="Number of submitted tasks.", metrics_group=MetricsGroup.TASKS, ) num_tasks_running: int = metric_field( default=0, description="Number of running tasks.", metrics_group=MetricsGroup.TASKS, ) num_tasks_have_outputs: int = metric_field( default=0, description="Number of tasks that already have output.", metrics_group=MetricsGroup.TASKS, ) num_tasks_finished: int = metric_field( default=0, description="Number of finished tasks.", metrics_group=MetricsGroup.TASKS, ) num_tasks_failed: int = metric_field( default=0, description="Number of failed tasks.", metrics_group=MetricsGroup.TASKS, ) block_generation_time: float = metric_field( default=0, description="Time spent generating blocks in tasks.", metrics_group=MetricsGroup.TASKS, ) task_submission_backpressure_time: float = metric_field( default=0, description="Time spent in task submission backpressure.", metrics_group=MetricsGroup.TASKS, ) task_output_backpressure_time: float = metric_field( default=0, description="Time spent in task output backpressure.", metrics_group=MetricsGroup.TASKS, ) task_completion_time_total_s: float = metric_field( default=0, description="Time spent running tasks to completion. This is a sum of all tasks' completion times.", metrics_group=MetricsGroup.TASKS, ) task_completion_time: RuntimeMetricsHistogram = metric_field( default_factory=lambda: RuntimeMetricsHistogram(histogram_buckets_s), description="Time spent per task running those tasks to completion.", metrics_group=MetricsGroup.TASKS, metrics_type=MetricsType.Histogram, metrics_args={"boundaries": histogram_buckets_s}, ) block_completion_time: RuntimeMetricsHistogram = metric_field( default_factory=lambda: RuntimeMetricsHistogram(histogram_buckets_s), description="Time spent running a single block to completion. If multiple blocks are generated per task, this is approximated by assuming each block took an equal amount of time to process.", metrics_group=MetricsGroup.TASKS, metrics_type=MetricsType.Histogram, metrics_args={"boundaries": histogram_buckets_s}, ) task_completion_time_excl_backpressure_s: float = metric_field( default=0, description="Time spent running tasks to completion without backpressure.", metrics_group=MetricsGroup.TASKS, ) block_size_bytes: RuntimeMetricsHistogram = metric_field( default_factory=lambda: RuntimeMetricsHistogram(histogram_buckets_bytes), description="Size of blocks generated by tasks.", metrics_group=MetricsGroup.TASKS, metrics_type=MetricsType.Histogram, metrics_args={"boundaries": histogram_buckets_bytes}, ) block_size_rows: RuntimeMetricsHistogram = metric_field( default_factory=lambda: RuntimeMetricsHistogram(histogram_bucket_rows), description="Number of rows in blocks generated by tasks.", metrics_group=MetricsGroup.TASKS, metrics_type=MetricsType.Histogram, metrics_args={"boundaries": histogram_bucket_rows}, ) # === Actor-related metrics === num_alive_actors: int = metric_field( default=0, description="Number of alive actors.", metrics_group=MetricsGroup.ACTORS, ) num_restarting_actors: int = metric_field( default=0, description="Number of restarting actors.", metrics_group=MetricsGroup.ACTORS, ) num_pending_actors: int = metric_field( default=0, description="Number of pending actors.", metrics_group=MetricsGroup.ACTORS, ) # === Object store memory metrics === obj_store_mem_internal_inqueue_blocks: int = metric_field( default=0, description="Number of blocks in operator's internal input queue.", metrics_group=MetricsGroup.OBJECT_STORE_MEMORY, ) obj_store_mem_internal_outqueue_blocks: int = metric_field( default=0, description="Number of blocks in the operator's internal output queue.", metrics_group=MetricsGroup.OBJECT_STORE_MEMORY, ) obj_store_mem_freed: int = metric_field( default=0, description="Byte size of freed memory in object store.", metrics_group=MetricsGroup.OBJECT_STORE_MEMORY, ) obj_store_mem_spilled: int = metric_field( default=0, description="Byte size of spilled memory in object store.", metrics_group=MetricsGroup.OBJECT_STORE_MEMORY, ) obj_store_mem_used: int = metric_field( default=0, description="Byte size of used memory in object store.", metrics_group=MetricsGroup.OBJECT_STORE_MEMORY, ) # === Miscellaneous metrics === # Use "metrics_group: "misc" in the metadata for new metrics in this section. def __init__(self, op: "PhysicalOperator"): from ray.data._internal.execution.operators.map_operator import MapOperator self._op = op self._is_map = isinstance(op, MapOperator) self._running_tasks: Dict[int, RunningTaskInfo] = {} self._extra_metrics: Dict[str, Any] = {} # Start time of current pause due to task submission backpressure self._task_submission_backpressure_start_time = -1 # Start time of current pause due to task output backpressure self._task_output_backpressure_start_time = -1 self._internal_inqueue = create_bundle_queue() self._internal_outqueue = create_bundle_queue() self._pending_task_inputs = create_bundle_queue() self._op_task_duration_stats = TaskDurationStats() self._per_node_metrics: Dict[str, NodeMetrics] = defaultdict(NodeMetrics) self._per_node_metrics_enabled: bool = op.data_context.enable_per_node_metrics self._cum_max_uss_bytes: Optional[int] = None self._issue_detector_hanging = 0 self._issue_detector_high_memory = 0 # Initialize the histogram metrics self.task_completion_time = RuntimeMetricsHistogram(histogram_buckets_s) self.block_completion_time = RuntimeMetricsHistogram(histogram_buckets_s) self.block_size_bytes = RuntimeMetricsHistogram(histogram_buckets_bytes) self.block_size_rows = RuntimeMetricsHistogram(histogram_bucket_rows) @property def extra_metrics(self) -> Dict[str, Any]: """Return a dict of extra metrics.""" return self._extra_metrics @classmethod def get_metrics(self) -> List[MetricDefinition]: return list(_METRICS) def as_dict( self, skip_internal_metrics: bool = False, ) -> Dict[str, Any]: """ Return a dict representation of the metrics. Args: skip_internal_metrics: Whether to skip internal metrics. Returns: A dict representation of the metrics. """ result = [] for metric in self.get_metrics(): if not self._is_map and metric.map_only: continue if skip_internal_metrics and metric.internal_only: continue value = getattr(self, metric.name) result.append((metric.name, value)) # TODO: record resource usage in OpRuntimeMetrics, # avoid calling self._op.current_processor_usage() resource_usage = self._op.current_processor_usage() result.extend( [ ("cpu_usage", resource_usage.cpu or 0), ("gpu_usage", resource_usage.gpu or 0), ] ) result.extend(self._extra_metrics.items()) return dict(result) @metric_property( description="Average number of blocks generated per task.", metrics_group=MetricsGroup.OUTPUTS, ) def average_num_outputs_per_task(self) -> Optional[float]: """Average number of output blocks per task, or None if no task has finished.""" if self.num_tasks_finished == 0: return None else: return self.num_outputs_of_finished_tasks / self.num_tasks_finished @metric_property( description="Average number of blocks generated per task.", metrics_group=MetricsGroup.INPUTS, ) def average_num_inputs_per_task(self) -> Optional[float]: """Average number of input blocks per task, or None if no task has finished.""" if self.num_tasks_finished == 0: return None else: return self.num_task_inputs_processed / self.num_tasks_finished @metric_property( description="Average number of output blocks per task per second.", metrics_group=MetricsGroup.OUTPUTS, ) def num_output_blocks_per_task_s(self) -> Optional[float]: """Average number of output blocks per task per second. If the operator hasn't produced any output yet, this metric returns `None`. """ if self.block_generation_time == 0: return None else: return self.num_task_outputs_generated / self.block_generation_time @metric_property( description="Average task's completion time in seconds (including throttling).", metrics_group=MetricsGroup.TASKS, ) def average_total_task_completion_time_s(self) -> Optional[float]: """Average task's completion time in seconds (including throttling)""" if self.num_tasks_finished == 0: return None else: return self.task_completion_time_total_s / self.num_tasks_finished @metric_property( description="Average task's completion time in seconds (excluding throttling).", metrics_group=MetricsGroup.TASKS, ) def average_task_completion_excl_backpressure_time_s(self) -> Optional[float]: """Average task's completion time in seconds (excluding throttling)""" if self.num_tasks_finished == 0: return None else: return ( self.task_completion_time_excl_backpressure_s / self.num_tasks_finished ) @metric_property( description="Average size of task output in bytes.", metrics_group=MetricsGroup.OUTPUTS, ) def average_bytes_per_output(self) -> Optional[float]: """Average size in bytes of output blocks.""" if self.num_task_outputs_generated == 0: return None else: return self.bytes_task_outputs_generated / self.num_task_outputs_generated @metric_property( description="Byte size of input blocks in the operator's internal input queue.", metrics_group=MetricsGroup.OBJECT_STORE_MEMORY, ) def obj_store_mem_internal_inqueue(self) -> int: return self._internal_inqueue.estimate_size_bytes() @metric_property( description=( "Byte size of output blocks in the operator's internal output queue." ), metrics_group=MetricsGroup.OBJECT_STORE_MEMORY, ) def obj_store_mem_internal_outqueue(self) -> int: return self._internal_outqueue.estimate_size_bytes() @metric_property( description="Byte size of input blocks used by pending tasks.", metrics_group=MetricsGroup.OBJECT_STORE_MEMORY, ) def obj_store_mem_pending_task_inputs(self) -> int: return self._pending_task_inputs.estimate_size_bytes() @property def obj_store_mem_pending_task_outputs(self) -> Optional[float]: """Estimated size in bytes of output blocks in Ray generator buffers. If an estimate isn't available, this property returns ``None``. """ per_task_output = self.obj_store_mem_max_pending_output_per_task if per_task_output is None: return None # Ray Data launches multiple tasks per actor, but only one task runs at a # time per actor. So, the number of actually running tasks is capped by the # number of active actors. from ray.data._internal.execution.operators.actor_pool_map_operator import ( ActorPoolMapOperator, ) num_tasks_running = self.num_tasks_running if isinstance(self._op, ActorPoolMapOperator): num_tasks_running = min( num_tasks_running, self._op._actor_pool.num_active_actors() ) return num_tasks_running * per_task_output @property def obj_store_mem_max_pending_output_per_task(self) -> Optional[float]: """Estimated size in bytes of output blocks in a task's generator buffer.""" context = self._op.data_context if context._max_num_blocks_in_streaming_gen_buffer is None: return None bytes_per_output = self.average_bytes_per_output # If we don’t have a sample yet and the limit is “unlimited”, we can’t # estimate – just bail out. if bytes_per_output is None: if context.target_max_block_size is None: return None else: # Block size can be up to MAX_SAFE_BLOCK_SIZE_FACTOR larger before being sliced. bytes_per_output = ( context.target_max_block_size * MAX_SAFE_BLOCK_SIZE_FACTOR ) num_pending_outputs = context._max_num_blocks_in_streaming_gen_buffer if self.average_num_outputs_per_task is not None: num_pending_outputs = min( num_pending_outputs, self.average_num_outputs_per_task ) return bytes_per_output * num_pending_outputs @metric_property( description="Average size of task inputs in bytes.", metrics_group=MetricsGroup.INPUTS, ) def average_bytes_inputs_per_task(self) -> Optional[float]: """Average size in bytes of ref bundles passed to tasks, or ``None`` if no tasks have been submitted.""" if self.num_tasks_submitted == 0: return None else: return self.bytes_inputs_of_submitted_tasks / self.num_tasks_submitted @metric_property( description="Average number of rows passed in to the task.", metrics_group=MetricsGroup.INPUTS, ) def average_rows_inputs_per_task(self) -> Optional[float]: """Average number of rows in input blocks per task, or None if no task has been submitted.""" if self.num_tasks_submitted == 0: return None else: return self.rows_inputs_of_submitted_tasks / self.num_tasks_submitted @metric_property( description="Average total output size of task in bytes.", metrics_group=MetricsGroup.OUTPUTS, ) def average_bytes_outputs_per_task(self) -> Optional[float]: """Average size in bytes of output blocks per task, or None if no task has finished.""" if self.num_tasks_finished == 0: return None else: return self.bytes_outputs_of_finished_tasks / self.num_tasks_finished @metric_property( description="Average number of rows produced per task.", metrics_group=MetricsGroup.OUTPUTS, ) def average_rows_outputs_per_task(self) -> Optional[float]: """Average number of rows in output blocks per task, or None if no task has finished.""" if self.num_tasks_finished == 0: return None else: return self.rows_outputs_of_finished_tasks / self.num_tasks_finished @metric_property( description="Average USS usage of tasks.", metrics_group=MetricsGroup.TASKS, ) def average_max_uss_per_task(self) -> Optional[float]: """Average max USS usage of tasks.""" if self._cum_max_uss_bytes is None: return None else: assert self.num_task_outputs_generated > 0, self.num_task_outputs_generated return self._cum_max_uss_bytes / self.num_task_outputs_generated @metric_property( description="Indicates if the operator is hanging.", metrics_group=MetricsGroup.MISC, internal_only=True, ) def issue_detector_hanging(self) -> int: return self._issue_detector_hanging @metric_property( description="Indicates if the operator is using high memory.", metrics_group=MetricsGroup.MISC, internal_only=True, ) def issue_detector_high_memory(self) -> int: return self._issue_detector_high_memory def on_input_received(self, input: RefBundle): """Callback when the operator receives a new input.""" self.num_inputs_received += 1 self.num_row_inputs_received += input.num_rows() or 0 self.bytes_inputs_received += input.size_bytes() def on_input_queued(self, input: RefBundle): """Callback when the operator queues an input.""" self.obj_store_mem_internal_inqueue_blocks += len(input.blocks) self._internal_inqueue.add(input) def on_input_dequeued(self, input: RefBundle): """Callback when the operator dequeues an input.""" self.obj_store_mem_internal_inqueue_blocks -= len(input.blocks) input_size = input.size_bytes() self._internal_inqueue.remove(input) assert self.obj_store_mem_internal_inqueue >= 0, ( self._op, self.obj_store_mem_internal_inqueue, input_size, ) def on_output_queued(self, output: RefBundle): """Callback when an output is queued by the operator.""" self.obj_store_mem_internal_outqueue_blocks += len(output.blocks) self._internal_outqueue.add(output) def on_output_dequeued(self, output: RefBundle): """Callback when an output is dequeued by the operator.""" self.obj_store_mem_internal_outqueue_blocks -= len(output.blocks) output_size = output.size_bytes() self._internal_outqueue.remove(output) assert self.obj_store_mem_internal_outqueue >= 0, ( self._op, self.obj_store_mem_internal_outqueue, output_size, ) def on_toggle_task_submission_backpressure(self, in_backpressure): if in_backpressure and self._task_submission_backpressure_start_time == -1: # backpressure starting, start timer self._task_submission_backpressure_start_time = time.perf_counter() elif self._task_submission_backpressure_start_time != -1: # backpressure stopping, stop timer self.task_submission_backpressure_time += ( time.perf_counter() - self._task_submission_backpressure_start_time ) self._task_submission_backpressure_start_time = -1 def on_toggle_task_output_backpressure(self, in_backpressure): if in_backpressure and self._task_output_backpressure_start_time == -1: # backpressure starting, start timer self._task_output_backpressure_start_time = time.perf_counter() elif self._task_output_backpressure_start_time != -1: # backpressure stopping, stop timer delta = time.perf_counter() - self._task_output_backpressure_start_time self.task_output_backpressure_time += delta self._task_output_backpressure_start_time = -1 def on_output_taken(self, output: RefBundle): """Callback when an output is taken from the operator.""" self.num_outputs_taken += 1 self.block_outputs_taken += len(output) self.row_outputs_taken += output.num_rows() or 0 self.bytes_outputs_taken += output.size_bytes() def on_task_submitted(self, task_index: int, inputs: RefBundle): """Callback when the operator submits a task.""" self.num_tasks_submitted += 1 self.num_tasks_running += 1 self.bytes_inputs_of_submitted_tasks += inputs.size_bytes() self.rows_inputs_of_submitted_tasks += inputs.num_rows() or 0 self._pending_task_inputs.add(inputs) self._running_tasks[task_index] = RunningTaskInfo( inputs=inputs, num_outputs=0, bytes_outputs=0, num_rows_produced=0, start_time=time.perf_counter(), cum_block_gen_time=0, ) def on_task_output_generated(self, task_index: int, output: RefBundle): """Callback when a new task generates an output.""" num_outputs = len(output) output_bytes = output.size_bytes() num_rows_produced = output.num_rows() self.num_task_outputs_generated += num_outputs self.bytes_task_outputs_generated += output_bytes self.rows_task_outputs_generated += num_rows_produced for block in output.metadata: if block.size_bytes is not None: self.block_size_bytes.observe(block.size_bytes) if block.num_rows is not None: self.block_size_rows.observe(block.num_rows) task_info = self._running_tasks[task_index] if task_info.num_outputs == 0: self.num_tasks_have_outputs += 1 task_info.num_outputs += num_outputs task_info.bytes_outputs += output_bytes task_info.num_rows_produced += num_rows_produced for block_ref, meta in output.blocks: assert ( meta.exec_stats is not None and meta.exec_stats.wall_time_s is not None ) self.block_generation_time += meta.exec_stats.wall_time_s task_info.cum_block_gen_time += meta.exec_stats.wall_time_s assert meta.num_rows is not None trace_allocation(block_ref, "operator_output") if meta.exec_stats.max_uss_bytes is not None: if self._cum_max_uss_bytes is None: self._cum_max_uss_bytes = meta.exec_stats.max_uss_bytes else: self._cum_max_uss_bytes += meta.exec_stats.max_uss_bytes # Update per node metrics if self._per_node_metrics_enabled: for _, meta in output.blocks: node_id = node_id_from_block_metadata(meta) node_metrics = self._per_node_metrics[node_id] node_metrics.bytes_outputs_of_finished_tasks += meta.size_bytes node_metrics.blocks_outputs_of_finished_tasks += 1 def on_task_finished(self, task_index: int, exception: Optional[Exception]): """Callback when a task is finished.""" self.num_tasks_running -= 1 self.num_tasks_finished += 1 if exception is not None: self.num_tasks_failed += 1 task_info = self._running_tasks[task_index] self.num_outputs_of_finished_tasks += task_info.num_outputs self.bytes_outputs_of_finished_tasks += task_info.bytes_outputs self.rows_outputs_of_finished_tasks += task_info.num_rows_produced task_time_delta = time.perf_counter() - task_info.start_time self.task_completion_time_total_s += task_time_delta self.task_completion_time.observe(task_time_delta) assert task_info.cum_block_gen_time is not None if task_info.num_outputs > 0: # Calculate the average block generation time per block block_time_delta = task_info.cum_block_gen_time / task_info.num_outputs self.block_completion_time.observe( block_time_delta, num_observations=task_info.num_outputs ) # NOTE: This is used for Issue Detection self._op_task_duration_stats.add_duration(task_time_delta) self.task_completion_time_excl_backpressure_s += task_info.cum_block_gen_time inputs = self._running_tasks[task_index].inputs self.num_task_inputs_processed += len(inputs) total_input_size = inputs.size_bytes() self.bytes_task_inputs_processed += total_input_size input_size = inputs.size_bytes() self._pending_task_inputs.remove(inputs) assert self.obj_store_mem_pending_task_inputs >= 0, ( self._op, self.obj_store_mem_pending_task_inputs, input_size, ) ctx = self._op.data_context if ctx.enable_get_object_locations_for_metrics: locations = ray.experimental.get_object_locations(inputs.block_refs) for block, meta in inputs.blocks: if locations[block].get("did_spill", False): assert meta.size_bytes is not None self.obj_store_mem_spilled += meta.size_bytes self.obj_store_mem_freed += total_input_size # Update per node metrics if self._per_node_metrics_enabled: node_ids = set() for _, meta in inputs.blocks: node_id = node_id_from_block_metadata(meta) node_metrics = self._per_node_metrics[node_id] # Stats to update once per node id or if node id is unknown if node_id not in node_ids or node_id == NODE_UNKNOWN: node_metrics.num_tasks_finished += 1 # Keep track of node ids to ensure we don't double count node_ids.add(node_id) inputs.destroy_if_owned() del self._running_tasks[task_index]
OpRuntimeMetrics
python
sphinx-doc__sphinx
sphinx/util/cfamily.py
{ "start": 8647, "end": 16786 }
class ____: def __init__( self, definition: str, *, location: nodes.Node | tuple[str, int] | str, config: Config, ) -> None: self.definition = definition.strip() self.location = location # for warnings self.config = config self.pos = 0 self.end = len(self.definition) self.last_match: re.Match[str] | None = None self._previous_state: tuple[int, re.Match[str] | None] = (0, None) self.otherErrors: list[DefinitionError] = [] # in our tests the following is set to False to capture bad parsing self.allowFallbackExpressionParsing = True def _make_multi_error(self, errors: list[Any], header: str) -> DefinitionError: if len(errors) == 1: if len(header) > 0: return DefinitionError(header + '\n' + str(errors[0][0])) else: return DefinitionError(str(errors[0][0])) result = [header, '\n'] for e in errors: if len(e[1]) > 0: indent = ' ' result.extend((e[1], ':\n')) for line in str(e[0]).split('\n'): if len(line) == 0: continue result.extend((indent, line, '\n')) else: result.append(str(e[0])) return DefinitionError(''.join(result)) @property def language(self) -> str: raise NotImplementedError def status(self, msg: str) -> None: # for debugging indicator = '-' * self.pos + '^' logger.debug(f'{msg}\n{self.definition}\n{indicator}') # NoQA: G004 def fail(self, msg: str) -> NoReturn: errors = [] indicator = '-' * self.pos + '^' msg = ( f'Invalid {self.language} declaration: {msg} [error at {self.pos}]\n' f' {self.definition}\n' f' {indicator}' ) exc_main = DefinitionError(msg) errors.append((exc_main, 'Main error')) errors.extend((err, 'Potential other error') for err in self.otherErrors) self.otherErrors = [] raise self._make_multi_error(errors, '') def warn(self, msg: str) -> None: logger.warning(msg, location=self.location) def match(self, regex: re.Pattern[str]) -> bool: match = regex.match(self.definition, self.pos) if match is not None: self._previous_state = (self.pos, self.last_match) self.pos = match.end() self.last_match = match return True return False def skip_string(self, string: str) -> bool: strlen = len(string) if self.definition[self.pos : self.pos + strlen] == string: self.pos += strlen return True return False def skip_word(self, word: str) -> bool: return self.match(re.compile(r'\b%s\b' % re.escape(word))) def skip_ws(self) -> bool: return self.match(_whitespace_re) def skip_word_and_ws(self, word: str) -> bool: if self.skip_word(word): self.skip_ws() return True return False def skip_string_and_ws(self, string: str) -> bool: if self.skip_string(string): self.skip_ws() return True return False @property def eof(self) -> bool: return self.pos >= self.end @property def current_char(self) -> str: try: return self.definition[self.pos] except IndexError: return 'EOF' @property def matched_text(self) -> str: if self.last_match is not None: return self.last_match.group() return '' def read_rest(self) -> str: rv = self.definition[self.pos :] self.pos = self.end return rv def assert_end(self, *, allowSemicolon: bool = False) -> None: self.skip_ws() if allowSemicolon: if not self.eof and self.definition[self.pos :] != ';': self.fail('Expected end of definition or ;.') else: if not self.eof: self.fail('Expected end of definition.') ################################################################################ @property def id_attributes(self) -> Sequence[str]: raise NotImplementedError @property def paren_attributes(self) -> Sequence[str]: raise NotImplementedError def _parse_balanced_token_seq(self, end: list[str]) -> str: # TODO: add handling of string literals and similar brackets = {'(': ')', '[': ']', '{': '}'} start_pos = self.pos symbols: list[str] = [] while not self.eof: if len(symbols) == 0 and self.current_char in end: break if self.current_char in brackets: symbols.append(brackets[self.current_char]) elif len(symbols) > 0 and self.current_char == symbols[-1]: symbols.pop() elif self.current_char in ')]}': self.fail("Unexpected '%s' in balanced-token-seq." % self.current_char) self.pos += 1 if self.eof: self.fail( f'Could not find end of balanced-token-seq starting at {start_pos}.' ) return self.definition[start_pos : self.pos] def _parse_attribute(self) -> ASTAttribute | None: self.skip_ws() # try C++11 style start_pos = self.pos if self.skip_string_and_ws('['): if not self.skip_string('['): self.pos = start_pos else: # TODO: actually implement the correct grammar arg = self._parse_balanced_token_seq(end=[']']) if not self.skip_string_and_ws(']'): self.fail("Expected ']' in end of attribute.") if not self.skip_string_and_ws(']'): self.fail("Expected ']' in end of attribute after [[...]") return ASTCPPAttribute(arg) # try GNU style if self.skip_word_and_ws('__attribute__'): if not self.skip_string_and_ws('('): self.fail("Expected '(' after '__attribute__'.") if not self.skip_string_and_ws('('): self.fail("Expected '(' after '__attribute__('.") attrs = [] while 1: if self.match(identifier_re): name = self.matched_text exprs = self._parse_paren_expression_list() attrs.append(ASTGnuAttribute(name, exprs)) if self.skip_string_and_ws(','): continue if self.skip_string_and_ws(')'): break self.fail("Expected identifier, ')', or ',' in __attribute__.") if not self.skip_string_and_ws(')'): self.fail("Expected ')' after '__attribute__((...)'") return ASTGnuAttributeList(attrs) # try the simple id attributes defined by the user for id in self.id_attributes: if self.skip_word_and_ws(id): return ASTIdAttribute(id) # try the paren attributes defined by the user for id in self.paren_attributes: if not self.skip_string_and_ws(id): continue if not self.skip_string('('): self.fail("Expected '(' after user-defined paren-attribute.") arg = self._parse_balanced_token_seq(end=[')']) if not self.skip_string(')'): self.fail("Expected ')' to end user-defined paren-attribute.") return ASTParenAttribute(id, arg) return None def _parse_attribute_list(self) -> ASTAttributeList: res = [] while True: attr = self._parse_attribute() if attr is None: break res.append(attr) return ASTAttributeList(res) def _parse_paren_expression_list(self) -> ASTBaseParenExprList | None: raise NotImplementedError
BaseParser
python
Textualize__textual
docs/examples/guide/compound/byte02.py
{ "start": 1688, "end": 2421 }
class ____(Widget): DEFAULT_CSS = """ ByteEditor > Container { height: 1fr; align: center middle; } ByteEditor > Container.top { background: $boost; } ByteEditor Input { width: 16; } """ def compose(self) -> ComposeResult: with Container(classes="top"): yield Input(placeholder="byte") with Container(): yield ByteInput() def on_bit_switch_bit_changed(self, event: BitSwitch.BitChanged) -> None: """When a switch changes, update the value.""" value = 0 for switch in self.query(BitSwitch): value |= switch.value << switch.bit self.query_one(Input).value = str(value)
ByteEditor
python
pypa__pipenv
pipenv/vendor/pipdeptree/_models/package.py
{ "start": 542, "end": 737 }
class ____(ValueError): """ An invalid requirement string was found. When raising an exception, this should provide just the problem requirement string. """
InvalidRequirementError
python
getsentry__sentry
tests/sentry/core/endpoints/test_team_details.py
{ "start": 2606, "end": 7939 }
class ____(TeamDetailsTestBase): method = "put" def test_simple(self) -> None: team = self.team # force creation self.get_success_response( team.organization.slug, team.slug, name="hello world", slug="foobar" ) team = Team.objects.get(id=team.id) assert team.name == "hello world" assert team.slug == "foobar" def test_invalid_numeric_slug(self) -> None: response = self.get_error_response(self.organization.slug, self.team.slug, slug="1234") assert response.data["slug"][0] == DEFAULT_SLUG_ERROR_MESSAGE def test_member_without_team_role(self) -> None: user = self.create_user("foo@example.com") team = self.create_team() member = self.create_member(user=user, organization=self.organization, role="member") self.create_team_membership(team, member) self.login_as(user) self.get_error_response(team.organization.slug, team.slug, slug="foobar", status_code=403) @with_feature("organizations:team-roles") def test_member_with_team_role(self) -> None: user = self.create_user("foo@example.com") team = self.create_team() member = self.create_member(user=user, organization=self.organization, role="member") self.create_team_membership(team, member, role="admin") self.login_as(user) self.get_success_response(team.organization.slug, team.slug, name="foo", slug="bar") team = Team.objects.get(id=team.id) assert team.name == "foo" assert team.slug == "bar" def test_admin_with_team_membership_with_id(self) -> None: """Admins can modify their teams""" org = self.create_organization() team = self.create_team(organization=org) user = self.create_user(email="foo@example.com", is_superuser=False) self.create_member(organization=org, user=user, role="admin", teams=[team]) self.login_as(user) self.get_success_response(team.organization.slug, team.id, name="foo", slug="bar") team = Team.objects.get(id=team.id) assert team.name == "foo" assert team.slug == "bar" def test_admin_with_team_membership(self) -> None: """Admins can modify their teams""" org = self.create_organization() team = self.create_team(organization=org) user = self.create_user(email="foo@example.com", is_superuser=False) self.create_member(organization=org, user=user, role="admin", teams=[team]) self.login_as(user) self.get_success_response(team.organization.slug, team.slug, name="foo", slug="bar") team = Team.objects.get(id=team.id) assert team.name == "foo" assert team.slug == "bar" def test_admin_without_team_membership(self) -> None: """Admins can't modify teams of which they're not inside, unless open membership is on.""" # an org with closed membership (byproduct of flags=0) org = self.create_organization(owner=self.user, flags=0) team = self.create_team(organization=org) user = self.create_user(email="foo@example.com", is_superuser=False) self.create_member(organization=org, user=user, role="admin", teams=[]) self.login_as(user) # first, try deleting the team with open membership off self.get_error_response(team.organization.slug, team.slug, slug="foobar", status_code=403) curr_slug = team.slug team = Team.objects.get(id=team.id) assert team.slug == curr_slug # now, with open membership on org.flags.allow_joinleave = True org.save() self.get_success_response(team.organization.slug, team.slug, name="foo", slug="bar") team = Team.objects.get(id=team.id) assert team.name == "foo" assert team.slug == "bar" def test_manager_without_team_membership(self) -> None: org = self.create_organization() team = self.create_team(organization=org) user = self.create_user(email="foo@example.com", is_superuser=False) self.create_member(organization=org, user=user, role="manager") self.login_as(user) self.get_success_response(team.organization.slug, team.slug, name="foo", slug="bar") team = Team.objects.get(id=team.id) assert team.name == "foo" assert team.slug == "bar" def test_owner_without_team_membership(self) -> None: org = self.create_organization() team = self.create_team(organization=org) user = self.create_user(email="foo@example.com", is_superuser=False) self.create_member(organization=org, user=user, role="owner") self.login_as(user) self.get_success_response(team.organization.slug, team.slug, name="foo", slug="bar") team = Team.objects.get(id=team.id) assert team.name == "foo" assert team.slug == "bar" def test_cannot_modify_idp_provisioned_teams(self) -> None: org = self.create_organization(owner=self.user) idp_team = self.create_team(organization=org, idp_provisioned=True) self.login_as(self.user) self.get_error_response( idp_team.organization.slug, idp_team.slug, name="foo", slug="bar", status_code=403 )
TeamUpdateTest
python
great-expectations__great_expectations
great_expectations/expectations/core/expect_multicolumn_sum_to_equal.py
{ "start": 2299, "end": 14594 }
class ____(MulticolumnMapExpectation): __doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION} ExpectMulticolumnSumToEqual is a \ Multicolumn Map Expectation. Multicolumn Map Expectations are evaluated for a set of columns and ask a yes/no question about the row-wise relationship between those columns. Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid. Args: column_list (tuple or list): {COLUMN_LIST_DESCRIPTION} sum_total (int or float): {SUM_TOTAL_DESCRIPTION} Other Parameters: ignore_row_if (str): \ "both_values_are_missing", "either_value_is_missing", "neither" \ {IGNORE_ROW_IF_DESCRIPTION} Default "neither". mostly (None or a float between 0 and 1): \ {MOSTLY_DESCRIPTION} \ For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly). Default 1. result_format (str or None): \ Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \ For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format). catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions). meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without \ modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta). severity (str or None): \ {FAILURE_SEVERITY_DESCRIPTION} \ For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity). Returns: An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result) Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta. Supported Data Sources: [{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/) [{SUPPORTED_DATA_SOURCES[13]}](https://docs.greatexpectations.io/docs/application_integration_support/) Data Quality Issues: {DATA_QUALITY_ISSUES[0]} Example Data: test test2 test3 0 1 2 4 1 2 -2 7 2 4 4 -3 Code Examples: Passing Case: Input: ExpectMulticolumnSumToEqual( column_list=["test", "test2", "test3"], sum_total=7, mostly=0.66 ) Output: {{ "exception_info": {{ "raised_exception": false, "exception_traceback": null, "exception_message": null }}, "result": {{ "element_count": 3, "unexpected_count": 1, "unexpected_percent": 33.33333333333333, "partial_unexpected_list": [ {{ "test": 4, "test2": 4, "test3": -3 }} ], "missing_count": 0, "missing_percent": 0.0, "unexpected_percent_total": 33.33333333333333, "unexpected_percent_nonmissing": 33.33333333333333 }}, "meta": {{}}, "success": true }} Failing Case: Input: ExpectMulticolumnSumToEqual( column_list=["test", "test2", "test3"], sum_total=7 ) Output: {{ "exception_info": {{ "raised_exception": false, "exception_traceback": null, "exception_message": null }}, "result": {{ "element_count": 3, "unexpected_count": 1, "unexpected_percent": 33.33333333333333, "partial_unexpected_list": [ {{ "test": 4, "test2": 4, "test3": -3 }} ], "missing_count": 0, "missing_percent": 0.0, "unexpected_percent_total": 33.33333333333333, "unexpected_percent_nonmissing": 33.33333333333333 }}, "meta": {{}}, "success": false }} """ # noqa: E501 # FIXME CoP sum_total: Union[float, SuiteParameterDict] = pydantic.Field(description=SUM_TOTAL_DESCRIPTION) ignore_row_if: Union[ Literal["all_values_are_missing", "any_value_is_missing", "never"], SuiteParameterDict, ] = pydantic.Field( default="all_values_are_missing", description=IGNORE_ROW_IF_DESCRIPTION, ) # This dictionary contains metadata for display in the public gallery library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = { "maturity": "production", "tags": [ "core expectation", "multi-column expectation", ], "contributors": ["@great_expectations"], "requirements": [], "has_full_test_suite": True, "manually_reviewed_code": True, } _library_metadata = library_metadata map_metric = "multicolumn_sum.equal" success_keys = ("mostly", "sum_total") args_keys = ( "column_list", "sum_total", ) class Config: title = "Expect multicolumn sum to equal" @staticmethod def schema_extra(schema: Dict[str, Any], model: Type[ExpectMulticolumnSumToEqual]) -> None: MulticolumnMapExpectation.Config.schema_extra(schema, model) schema["properties"]["metadata"]["properties"].update( { "data_quality_issues": { "title": "Data Quality Issues", "type": "array", "const": DATA_QUALITY_ISSUES, }, "library_metadata": { "title": "Library Metadata", "type": "object", "const": model._library_metadata, }, "short_description": { "title": "Short Description", "type": "string", "const": EXPECTATION_SHORT_DESCRIPTION, }, "supported_data_sources": { "title": "Supported Data Sources", "type": "array", "const": SUPPORTED_DATA_SOURCES, }, } ) @classmethod def _prescriptive_template( cls, renderer_configuration: RendererConfiguration, ) -> RendererConfiguration: add_param_args: AddParamArgs = ( ("column_list", RendererValueType.ARRAY), ("sum_total", RendererValueType.NUMBER), ("mostly", RendererValueType.NUMBER), ("ignore_row_if", RendererValueType.STRING), ) for name, param_type in add_param_args: renderer_configuration.add_param(name=name, param_type=param_type) params = renderer_configuration.params template_str = "" if params.column_list: array_param_name = "column_list" param_prefix = "column_list_" renderer_configuration = cls._add_array_params( array_param_name=array_param_name, param_prefix=param_prefix, renderer_configuration=renderer_configuration, ) template_str += "Sum across columns " + cls._get_array_string( array_param_name=array_param_name, param_prefix=param_prefix, renderer_configuration=renderer_configuration, ) if params.mostly and params.mostly.value < 1.0: renderer_configuration = cls._add_mostly_pct_param( renderer_configuration=renderer_configuration ) template_str += " must be $sum_total, at least $mostly_pct % of the time." else: template_str += " must be $sum_total." renderer_configuration.template_str = template_str return renderer_configuration @classmethod @renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE) def _prescriptive_renderer( cls, configuration: Optional[ExpectationConfiguration] = None, result: Optional[ExpectationValidationResult] = None, runtime_configuration: Optional[dict] = None, **kwargs, ) -> List[RenderedStringTemplateContent]: runtime_configuration = runtime_configuration or {} styling = runtime_configuration.get("styling") params = substitute_none_for_missing( configuration.kwargs, ["column_list", "sum_total", "mostly"], ) if params["mostly"] is not None: params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True) mostly_str = "" if params.get("mostly") is None else ", at least $mostly_pct % of the time" sum_total = params.get("sum_total") # noqa: F841 # FIXME CoP column_list_str = "" for idx in range(len(params["column_list"]) - 1): column_list_str += f"$column_list_{idx!s}, " params[f"column_list_{idx!s}"] = params["column_list"][idx] last_idx = len(params["column_list"]) - 1 column_list_str += f"$column_list_{last_idx!s}" params[f"column_list_{last_idx!s}"] = params["column_list"][last_idx] template_str = f"Sum across columns {column_list_str} must be $sum_total{mostly_str}." return [ RenderedStringTemplateContent( **{ "content_block_type": "string_template", "string_template": { "template": template_str, "params": params, "styling": styling, }, } ) ]
ExpectMulticolumnSumToEqual
python
kamyu104__LeetCode-Solutions
Python/maximum-tastiness-of-candy-basket.py
{ "start": 84, "end": 865 }
class ____(object): def maximumTastiness(self, price, k): """ :type price: List[int] :type k: int :rtype: int """ def check(x): # max cnt if smallest absolute difference >= x cnt = prev = 0 for i in xrange(len(price)): if prev and price[i]-prev < x: continue cnt += 1 if cnt == k: break prev = price[i] return cnt >= k price.sort() left, right = 1, price[-1]-price[0] while left <= right: mid = left + (right-left)//2 if not check(mid): right = mid-1 else: left = mid+1 return right
Solution
python
pytorch__pytorch
torch/_utils.py
{ "start": 37790, "end": 40858 }
class ____(Generic[P]): def __init__(self, name: str): self.name = name self.callback_list: list[Callable[P, None]] = [] def add_callback(self, cb: Callable[P, None]) -> None: self.callback_list.append(cb) def fire_callbacks(self, *args: P.args, **kwargs: P.kwargs) -> None: for cb in self.callback_list: try: cb(*args, **kwargs) except Exception: logger.exception( "Exception in callback for %s registered with gpu trace", self.name ) def try_import(module_name: str) -> Optional[ModuleType]: # Implementation based on # https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported if (module := sys.modules.get(module_name, None)) is not None: return module if (spec := importlib.util.find_spec(module_name)) is not None: module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module # https://docs.python.org/3/library/importlib.html#importlib.machinery.ModuleSpec.loader # "The finder should always set this attribute" assert spec.loader is not None, "The loader attribute should always be set" spec.loader.exec_module(module) return module return None # IMPORT_MAPPING and NAME_MAPPING are adapted from https://github.com/python/cpython/blob/main/Lib/_compat_pickle.py # for use in the weights_only Unpickler. IMPORT_MAPPING = { "__builtin__": "builtins", "copy_reg": "copyreg", "Queue": "queue", "repr": "reprlib", "_abcoll": "collections.abc", # Non-mutual mappings. "UserDict": "collections", "UserList": "collections", "UserString": "collections", "whichdb": "dbm", "StringIO": "io", "cStringIO": "io", } # This contains rename rules that are easy to handle. We ignore the more # complex stuff (e.g. mapping the names in the urllib and types modules). # These rules should be run before import names are fixed. NAME_MAPPING = { ("__builtin__", "xrange"): ("builtins", "range"), ("__builtin__", "reduce"): ("functools", "reduce"), ("__builtin__", "intern"): ("sys", "intern"), ("__builtin__", "unichr"): ("builtins", "chr"), ("__builtin__", "unicode"): ("builtins", "str"), ("__builtin__", "long"): ("builtins", "int"), ("itertools", "izip"): ("builtins", "zip"), ("itertools", "imap"): ("builtins", "map"), ("itertools", "ifilter"): ("builtins", "filter"), ("itertools", "ifilterfalse"): ("itertools", "filterfalse"), ("itertools", "izip_longest"): ("itertools", "zip_longest"), ("UserDict", "IterableUserDict"): ("collections", "UserDict"), ("UserList", "UserList"): ("collections", "UserList"), ("UserString", "UserString"): ("collections", "UserString"), # Non-mutual mappings. ("__builtin__", "basestring"): ("builtins", "str"), ("exceptions", "StandardError"): ("builtins", "Exception"), ("UserDict", "UserDict"): ("collections", "UserDict"), }
CallbackRegistry
python
PyCQA__pylint
tests/functional/r/regression/regression_3091.py
{ "start": 102, "end": 239 }
class ____(): fun = lambda self, x: x * 2 def __init__(self): x = self.fun(1) # Crashes pylint 2.3.1 print(x)
MyClass
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/kubernetes_engine.py
{ "start": 2388, "end": 2603 }
class ____(BaseGoogleLink): """Helper class for constructing Kubernetes Engine Pod Link.""" name = "Kubernetes Pod" key = "kubernetes_pod_conf" format_str = KUBERNETES_POD_LINK
KubernetesEnginePodLink
python
airbytehq__airbyte
airbyte-integrations/connectors/source-monday/unit_tests/integrations/monday_requests/items_request_builder.py
{ "start": 209, "end": 1288 }
class ____(MondayBaseRequestBuilder): @classmethod def items_endpoint(cls, authenticator: Authenticator, board_ids: List[int] = None) -> "ItemsRequestBuilder": return cls().with_authenticator(authenticator).with_board_ids(board_ids) @property def request_body(self): params = super().query_params or {} if self._board_ids: board_ids = ", ".join(list(map(str, self._board_ids))) board_ids_str = f",ids:[{board_ids}]" else: board_ids_str = "" params["query"] = ( "{boards(limit:1%s){items_page(limit:20){cursor,items{id,name,assets{created_at,file_extension,file_size,id,name,original_geometry,public_url,uploaded_by{id},url,url_thumbnail},board{id,name},column_values{id,text,type,value,... on MirrorValue{display_value},... on BoardRelationValue{display_value},... on DependencyValue{display_value}},created_at,creator_id,group{id},parent_item{id},state,subscribers{id},updated_at,updates{id}}}}}" % board_ids_str ) return params
ItemsRequestBuilder
python
kamyu104__LeetCode-Solutions
Python/move-sub-tree-of-n-ary-tree.py
{ "start": 251, "end": 1880 }
class ____(object): def moveSubTree(self, root, p, q): """ :type root: Node :type p: Node :type q: Node :rtype: Node """ def iter_find_parents(node, parent, p, q, is_ancestor, lookup): stk = [(1, [node, None, False])] while stk: step, params = stk.pop() if step == 1: node, parent, is_ancestor = params if node in (p, q): lookup[node] = parent if len(lookup) == 2: return is_ancestor stk.append((2, [node, is_ancestor, reversed(node.children)])) else: node, is_ancestor, it = params child = next(it, None) if not child: continue stk.append((2, [node, is_ancestor, it])) stk.append((1, [child, node, is_ancestor or node == p])) assert(False) return False lookup = {} is_ancestor = iter_find_parents(root, None, p, q, False, lookup) if p in lookup and lookup[p] == q: return root q.children.append(p) if not is_ancestor: lookup[p].children.remove(p) else: lookup[q].children.remove(q) if p == root: root = q else: lookup[p].children[lookup[p].children.index(p)] = q return root # Time: O(n) # Space: O(h) # one pass solution with recursion (bad in deep tree)
Solution
python
PrefectHQ__prefect
src/prefect/server/schemas/core.py
{ "start": 12328, "end": 12597 }
class ____(PrefectBaseModel): """ Base class for classes that represent inputs to runs, which could include, constants, parameters, task runs or flow runs. """ model_config: ClassVar[ConfigDict] = ConfigDict(frozen=True) input_type: str
RunInput
python
ray-project__ray
python/ray/serve/_private/benchmarks/streaming/streaming_http_throughput.py
{ "start": 756, "end": 3701 }
class ____: def __init__(self, downstream: DeploymentHandle): logging.getLogger("ray.serve").setLevel(logging.WARNING) self._h = downstream.options(stream=True) async def stream(self): async for token in self._h.stream.remote(): yield token def __call__(self, *args): return StreamingResponse(self.stream()) async def _consume_single_stream(): async with aiohttp.ClientSession(raise_for_status=True) as session: async with session.get("http://localhost:8000") as r: async for line in r.content: pass async def run_benchmark( tokens_per_request: int, batch_size: int, num_trials: int, trial_runtime: float, ) -> Tuple[float, float]: async def _do_single_batch(): await asyncio.gather(*[_consume_single_stream() for _ in range(batch_size)]) return await run_throughput_benchmark( fn=_do_single_batch, multiplier=batch_size * tokens_per_request, num_trials=num_trials, trial_runtime=trial_runtime, ) @click.command(help="Benchmark streaming HTTP throughput.") @click.option( "--tokens-per-request", type=int, default=1000, help="Number of requests to send to downstream deployment in each trial.", ) @click.option( "--batch-size", type=int, default=10, help="Number of requests to send to downstream deployment in each trial.", ) @click.option( "--num-replicas", type=int, default=1, help="Number of replicas in the downstream deployment.", ) @click.option( "--num-trials", type=int, default=5, help="Number of trials of the benchmark to run.", ) @click.option( "--trial-runtime", type=int, default=1, help="Duration to run each trial of the benchmark for (seconds).", ) @click.option( "--use-intermediate-deployment", is_flag=True, default=False, help="Whether to run an intermediate deployment proxying the requests.", ) def main( tokens_per_request: int, batch_size: int, num_replicas: int, num_trials: int, trial_runtime: float, use_intermediate_deployment: bool, ): app = Downstream.options(num_replicas=num_replicas).bind(tokens_per_request) if use_intermediate_deployment: app = Intermediate.bind(app) serve.run(app) mean, stddev = asyncio.new_event_loop().run_until_complete( run_benchmark( tokens_per_request, batch_size, num_trials, trial_runtime, ) ) print( "HTTP streaming throughput {}: {} +- {} tokens/s".format( f"(num_replicas={num_replicas}, " f"tokens_per_request={tokens_per_request}, " f"batch_size={batch_size}, " f"use_intermediate_deployment={use_intermediate_deployment})", mean, stddev, ) ) if __name__ == "__main__": main()
Intermediate
python
weaviate__weaviate-python-client
weaviate/collections/classes/config.py
{ "start": 62810, "end": 63346 }
class ____(_VectorIndexConfig): cleanup_interval_seconds: int distance_metric: VectorDistances dynamic_ef_min: int dynamic_ef_max: int dynamic_ef_factor: int ef: int ef_construction: int filter_strategy: VectorFilterStrategy flat_search_cutoff: int max_connections: int skip: bool vector_cache_max_objects: int @staticmethod def vector_index_type() -> str: return VectorIndexType.HNSW.value VectorIndexConfigHNSW = _VectorIndexConfigHNSW @dataclass
_VectorIndexConfigHNSW
python
django__django
tests/migrations/test_state.py
{ "start": 60511, "end": 70904 }
class ____(SimpleTestCase): def test_custom_model_base(self): state = ModelState.from_model(ModelWithCustomBase) self.assertEqual(state.bases, (models.Model,)) def test_bound_field_sanity_check(self): field = models.CharField(max_length=1) field.model = models.Model with self.assertRaisesMessage( ValueError, 'ModelState.fields cannot be bound to a model - "field" is.' ): ModelState("app", "Model", [("field", field)]) def test_sanity_check_to(self): field = models.ForeignKey(UnicodeModel, models.CASCADE) with self.assertRaisesMessage( ValueError, 'Model fields in "ModelState.fields" cannot refer to a model class - ' '"app.Model.field.to" does. Use a string reference instead.', ): ModelState("app", "Model", [("field", field)]) def test_sanity_check_through(self): field = models.ManyToManyField("UnicodeModel") field.remote_field.through = UnicodeModel with self.assertRaisesMessage( ValueError, 'Model fields in "ModelState.fields" cannot refer to a model class - ' '"app.Model.field.through" does. Use a string reference instead.', ): ModelState("app", "Model", [("field", field)]) def test_sanity_index_name(self): field = models.IntegerField() options = {"indexes": [models.Index(fields=["field"])]} msg = ( "Indexes passed to ModelState require a name attribute. <Index: " "fields=['field']> doesn't have one." ) with self.assertRaisesMessage(ValueError, msg): ModelState("app", "Model", [("field", field)], options=options) def test_fields_immutability(self): """ Rendering a model state doesn't alter its internal fields. """ apps = Apps() field = models.CharField(max_length=1) state = ModelState("app", "Model", [("name", field)]) Model = state.render(apps) self.assertNotEqual(Model._meta.get_field("name"), field) def test_repr(self): field = models.CharField(max_length=1) state = ModelState( "app", "Model", [("name", field)], bases=["app.A", "app.B", "app.C"] ) self.assertEqual(repr(state), "<ModelState: 'app.Model'>") project_state = ProjectState() project_state.add_model(state) with self.assertRaisesMessage( InvalidBasesError, "Cannot resolve bases for [<ModelState: 'app.Model'>]" ): project_state.apps def test_fields_ordering_equality(self): state = ModelState( "migrations", "Tag", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ("hidden", models.BooleanField()), ], ) reordered_state = ModelState( "migrations", "Tag", [ ("id", models.AutoField(primary_key=True)), # Purposely re-ordered. ("hidden", models.BooleanField()), ("name", models.CharField(max_length=100)), ], ) self.assertEqual(state, reordered_state) @override_settings(TEST_SWAPPABLE_MODEL="migrations.SomeFakeModel") def test_create_swappable(self): """ Tests making a ProjectState from an Apps with a swappable model """ new_apps = Apps(["migrations"]) class Author(models.Model): name = models.CharField(max_length=255) bio = models.TextField() age = models.IntegerField(blank=True, null=True) class Meta: app_label = "migrations" apps = new_apps swappable = "TEST_SWAPPABLE_MODEL" author_state = ModelState.from_model(Author) self.assertEqual(author_state.app_label, "migrations") self.assertEqual(author_state.name, "Author") self.assertEqual(list(author_state.fields), ["id", "name", "bio", "age"]) self.assertEqual(author_state.fields["name"].max_length, 255) self.assertIs(author_state.fields["bio"].null, False) self.assertIs(author_state.fields["age"].null, True) self.assertEqual( author_state.options, {"swappable": "TEST_SWAPPABLE_MODEL", "indexes": [], "constraints": []}, ) self.assertEqual(author_state.bases, (models.Model,)) self.assertEqual(author_state.managers, []) @override_settings(TEST_SWAPPABLE_MODEL="migrations.SomeFakeModel") def test_create_swappable_from_abstract(self): """ A swappable model inheriting from a hierarchy: concrete -> abstract -> concrete. """ new_apps = Apps(["migrations"]) class SearchableLocation(models.Model): keywords = models.CharField(max_length=256) class Meta: app_label = "migrations" apps = new_apps class Station(SearchableLocation): name = models.CharField(max_length=128) class Meta: abstract = True class BusStation(Station): bus_routes = models.CharField(max_length=128) inbound = models.BooleanField(default=False) class Meta(Station.Meta): app_label = "migrations" apps = new_apps swappable = "TEST_SWAPPABLE_MODEL" station_state = ModelState.from_model(BusStation) self.assertEqual(station_state.app_label, "migrations") self.assertEqual(station_state.name, "BusStation") self.assertEqual( list(station_state.fields), ["searchablelocation_ptr", "name", "bus_routes", "inbound"], ) self.assertEqual(station_state.fields["name"].max_length, 128) self.assertIs(station_state.fields["bus_routes"].null, False) self.assertEqual( station_state.options, { "abstract": False, "swappable": "TEST_SWAPPABLE_MODEL", "indexes": [], "constraints": [], }, ) self.assertEqual(station_state.bases, ("migrations.searchablelocation",)) self.assertEqual(station_state.managers, []) @override_settings(TEST_SWAPPABLE_MODEL="migrations.SomeFakeModel") def test_custom_manager_swappable(self): """ Tests making a ProjectState from unused models with custom managers """ new_apps = Apps(["migrations"]) class Food(models.Model): food_mgr = FoodManager("a", "b") food_qs = FoodQuerySet.as_manager() food_no_mgr = NoMigrationFoodManager("x", "y") class Meta: app_label = "migrations" apps = new_apps swappable = "TEST_SWAPPABLE_MODEL" food_state = ModelState.from_model(Food) # The default manager is used in migrations self.assertEqual([name for name, mgr in food_state.managers], ["food_mgr"]) self.assertEqual(food_state.managers[0][1].args, ("a", "b", 1, 2)) @isolate_apps("migrations", "django.contrib.contenttypes") def test_order_with_respect_to_private_field(self): class PrivateFieldModel(models.Model): content_type = models.ForeignKey("contenttypes.ContentType", models.CASCADE) object_id = models.PositiveIntegerField() private = GenericForeignKey() class Meta: order_with_respect_to = "private" state = ModelState.from_model(PrivateFieldModel) self.assertNotIn("order_with_respect_to", state.options) @isolate_apps("migrations") def test_abstract_model_children_inherit_indexes(self): class Abstract(models.Model): name = models.CharField(max_length=50) class Meta: app_label = "migrations" abstract = True indexes = [models.Index(fields=["name"])] class Child1(Abstract): pass class Child2(Abstract): pass abstract_state = ModelState.from_model(Abstract) child1_state = ModelState.from_model(Child1) child2_state = ModelState.from_model(Child2) index_names = [index.name for index in abstract_state.options["indexes"]] self.assertEqual(index_names, ["migrations__name_ae16a4_idx"]) index_names = [index.name for index in child1_state.options["indexes"]] self.assertEqual(index_names, ["migrations__name_b0afd7_idx"]) index_names = [index.name for index in child2_state.options["indexes"]] self.assertEqual(index_names, ["migrations__name_016466_idx"]) # Modifying the state doesn't modify the index on the model. child1_state.options["indexes"][0].name = "bar" self.assertEqual(Child1._meta.indexes[0].name, "migrations__name_b0afd7_idx") @isolate_apps("migrations") def test_explicit_index_name(self): class TestModel(models.Model): name = models.CharField(max_length=50) class Meta: app_label = "migrations" indexes = [models.Index(fields=["name"], name="foo_idx")] model_state = ModelState.from_model(TestModel) index_names = [index.name for index in model_state.options["indexes"]] self.assertEqual(index_names, ["foo_idx"]) @isolate_apps("migrations") def test_from_model_constraints(self): class ModelWithConstraints(models.Model): size = models.IntegerField() class Meta: constraints = [ models.CheckConstraint( condition=models.Q(size__gt=1), name="size_gt_1" ) ] state = ModelState.from_model(ModelWithConstraints) model_constraints = ModelWithConstraints._meta.constraints state_constraints = state.options["constraints"] self.assertEqual(model_constraints, state_constraints) self.assertIsNot(model_constraints, state_constraints) self.assertIsNot(model_constraints[0], state_constraints[0])
ModelStateTests
python
pandas-dev__pandas
asv_bench/benchmarks/sparse.py
{ "start": 484, "end": 921 }
class ____: def setup(self): K = 50 N = 50001 rng = date_range("1/1/2000", periods=N, freq="min") self.series = {} for i in range(1, K): data = np.random.randn(N)[:-i] idx = rng[:-i] data[100:] = np.nan self.series[i] = Series(SparseArray(data), index=idx) def time_series_to_frame(self): pd.DataFrame(self.series)
SparseSeriesToFrame
python
viewflow__viewflow
viewflow/workflow/flow/views/detail.py
{ "start": 1387, "end": 1803 }
class ____(mixins.TaskViewTemplateNames, generic.TemplateView): """ Default detail view for the flow task. Get confirmation from user, assigns task and redirects to task pages. """ template_filename = "task_detail.html" def get_actions(self): activation = self.request.activation return activation.flow_task.get_available_actions(activation, self.request.user)
DetailTaskView
python
doocs__leetcode
solution/3100-3199/3122.Minimum Number of Operations to Satisfy Conditions/Solution.py
{ "start": 0, "end": 615 }
class ____: def minimumOperations(self, grid: List[List[int]]) -> int: m, n = len(grid), len(grid[0]) f = [[inf] * 10 for _ in range(n)] for i in range(n): cnt = [0] * 10 for j in range(m): cnt[grid[j][i]] += 1 if i == 0: for j in range(10): f[i][j] = m - cnt[j] else: for j in range(10): for k in range(10): if k != j: f[i][j] = min(f[i][j], f[i - 1][k] + m - cnt[j]) return min(f[-1])
Solution
python
tensorflow__tensorflow
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
{ "start": 2180, "end": 36945 }
class ____(quantize_model_test_base.QuantizedModelTest): @parameterized.parameters( testing.parameter_combinations([{ 'bias_fn': ( None, nn_ops.bias_add, ), 'activation_fn': ( None, nn_ops.relu, nn_ops.relu6, ), 'dim_sizes': ( # tf.MatMul cases. ([None, 1024], [1024, 3]), # dynamic batch dim. ([1, 1024], [1024, 3]), # tf.BatchMatMul cases. ([10, 1, 1024], [10, 1024, 3]), ([2, 3, 1, 1024], [2, 3, 1024, 3]), ), 'merge_fusion_with_dequantize': (False, True), }]) ) @test_util.run_in_graph_and_eager_modes def test_matmul_ptq_model( self, bias_fn: Optional[ops.Operation], activation_fn: Optional[ops.Operation], dim_sizes: Sequence[int], merge_fusion_with_dequantize: bool, ): lhs_dim_size, rhs_dim_size = dim_sizes input_shape = (*lhs_dim_size,) filter_shape = (*rhs_dim_size,) static_input_shape = [dim if dim is not None else 2 for dim in input_shape] model = self._create_matmul_model( input_shape, filter_shape, self._input_saved_model_path, bias_fn, activation_fn, ) rng = np.random.default_rng(seed=42) input_data = ops.convert_to_tensor( rng.uniform(low=0.0, high=1.0, size=static_input_shape).astype( np.float32 ) ) def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(100): yield { 'input_tensor': rng.uniform( low=0.0, high=1.0, size=static_input_shape ).astype(np.float32) } dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ) ] ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), pipeline_config=qc.PipelineConfig( merge_fusion_with_dequantize=merge_fusion_with_dequantize ), ) quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) expected_outputs = model.matmul(input_data) root = load.load(self._output_saved_model_path) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) new_outputs = root.signatures['serving_default']( input_tensor=ops.convert_to_tensor(input_data) ) module_str = self._extract_first_xla_call_module_op( self._output_saved_model_path ) self.assertTrue(re.search('stablehlo.dot_general.*xi8>', module_str)) if bias_fn: self.assertTrue(re.search('stablehlo.add.*xi32>', module_str)) # Consider if there is a way to check if activation fusion is properly # done in MLIR level. # Tests that the quantized graph outputs similar values. The rtol and atol # values are arbitrary. self.assertAllClose(new_outputs, expected_outputs, rtol=0.3, atol=0.2) # Due to other meta data, the compression is not exactly 1/4. self.assertLess( testing.get_size_ratio( self._output_saved_model_path, self._input_saved_model_path ), 0.65, ) if merge_fusion_with_dequantize: # Check activation functions are explicitly present. # If present the last op before return should be stablehlo.clamp for relu6 # and stablehlo.maximum for relu. if activation_fn is nn_ops.relu6: self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return') elif activation_fn is nn_ops.relu: self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return') else: # Check activation functions are implicit. self.assertNotRegex(module_str, r'stablehlo.clamp.*\n.*return') self.assertNotRegex(module_str, r'stablehlo.maximum.*\n.*return') @parameterized.parameters( testing.parameter_combinations([{ 'same_scale_op': ( 'concatenate', 'gather', 'max_pool', 'pad', 'reshape', 'select', 'slice', 'transpose', ), }]) ) @test_util.run_in_graph_and_eager_modes def test_matmul_and_same_scale_ptq_model( self, same_scale_op: str, ): input_shape = (2, 3, 1, 1024) filter_shape = (2, 3, 1024, 3) static_input_shape = [dim if dim is not None else 2 for dim in input_shape] model = self._create_matmul_and_same_scale_model( input_shape, filter_shape, self._input_saved_model_path, same_scale_op, ) rng = np.random.default_rng(seed=42) input_data = ops.convert_to_tensor( rng.uniform(low=0.0, high=1.0, size=static_input_shape).astype( np.float32 ) ) def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(100): yield { 'input_tensor': rng.uniform( low=0.0, high=1.0, size=static_input_shape ).astype(np.float32) } dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ) ] ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), ) quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) expected_outputs = model.matmul_and_same_scale(input_data) root = load.load(self._output_saved_model_path) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) new_outputs = root.signatures['serving_default']( input_tensor=ops.convert_to_tensor(input_data) ) # Tests that the quantized graph outputs similar values. The rtol and atol # values are arbitrary. self.assertAllClose(new_outputs, expected_outputs, rtol=0.03, atol=0.2) # Due to other meta data, the compression is not exactly 1/4. self.assertLess( testing.get_size_ratio( self._output_saved_model_path, self._input_saved_model_path ), 0.65, ) @parameterized.parameters( testing.parameter_combinations([{ 'same_scale_op': ( 'reshape', # This corresponds to stablehlo.dynamic_reshape 'slice', # This corresponds to stablehlo.dynamic_slice. # TODO: b/326242075 - Support other same-scale ops. ), 'dim_sizes': (([None, 1024], [1024, 3]),), }]) ) @test_util.run_in_graph_and_eager_modes def test_matmul_and_same_scale_ptq_model_dynamic( self, same_scale_op: str, dim_sizes: Sequence[int], ): input_dim_size, filter_dim_size = dim_sizes input_shape = (*input_dim_size,) filter_shape = (*filter_dim_size,) static_input_shape = [dim if dim is not None else 2 for dim in input_shape] model = self._create_matmul_and_same_scale_model( input_shape, filter_shape, self._input_saved_model_path, same_scale_op, ) rng = np.random.default_rng(seed=42) input_data = ops.convert_to_tensor( rng.uniform(low=0.0, high=1.0, size=static_input_shape).astype( np.float32 ) ) def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(100): yield { 'input_tensor': rng.uniform( low=0.0, high=1.0, size=static_input_shape ).astype(np.float32) } dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ) ] ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), ) quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) expected_outputs = model.matmul_and_same_scale(input_data) root = load.load(self._output_saved_model_path) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) new_outputs = root.signatures['serving_default']( input_tensor=ops.convert_to_tensor(input_data) ) # Tests that the quantized graph outputs similar values. The rtol and atol # values are arbitrary. self.assertAllClose(new_outputs, expected_outputs, rtol=0.03, atol=0.2) # Due to other meta data, the compression is not exactly 1/4. self.assertLess( testing.get_size_ratio( self._output_saved_model_path, self._input_saved_model_path ), 0.6, ) @parameterized.parameters( testing.parameter_combinations([{ 'bias_fn': ( None, nn_ops.bias_add, ), 'activation_fn': ( None, nn_ops.relu, nn_ops.relu6, ), 'has_batch_norm': (False, True), 'input_shape_dynamic': ( False, True, ), 'enable_per_channel_quantized_weight': ( False, True, ), 'merge_fusion_with_dequantize': (False, True), 'has_func_alias': (False, True), }]) ) @test_util.run_in_graph_and_eager_modes def test_conv_ptq_model( self, bias_fn: Optional[ops.Operation], activation_fn: Optional[ops.Operation], has_batch_norm: bool, input_shape_dynamic: bool, enable_per_channel_quantized_weight: bool, merge_fusion_with_dequantize: bool, dilations: Sequence[int] = None, has_func_alias: bool = False, ): input_shape = (None, 3, 4, 3) if input_shape_dynamic else (1, 3, 4, 3) filter_shape = (2, 3, 3, 2) strides = (1, 1, 1, 1) model = self._create_conv2d_model( input_shape, filter_shape, self._input_saved_model_path, bias_fn, activation_fn, has_batch_norm, strides, dilations, 'SAME', has_func_alias, ) # TODO: b/331809306 - Investigate why these test fail then re-enable. if has_batch_norm and (bias_fn or not input_shape_dynamic): return # TODO: b/331120943 - Re-enable this after correctly handling quantization # granularity per quantizable scope. if has_batch_norm and (not bias_fn and input_shape_dynamic): return # Generate model input data. rng = np.random.default_rng(seed=42) static_input_shape = [dim if dim is not None else 2 for dim in input_shape] input_data = ops.convert_to_tensor( rng.uniform(low=0.0, high=1.0, size=static_input_shape).astype( np.float32 ) ) def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(100): yield { 'input_tensor': rng.uniform( low=0.0, high=1.0, size=static_input_shape ).astype(np.float32) } dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ) ], enable_per_channel_quantized_weight=enable_per_channel_quantized_weight, ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), pipeline_config=qc.PipelineConfig( merge_fusion_with_dequantize=merge_fusion_with_dequantize ), ) quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) expected_outputs = model.conv2d(input_data) root = load.load(self._output_saved_model_path) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) new_outputs = root.signatures['serving_default']( input_tensor=ops.convert_to_tensor(input_data) ) module_str = self._extract_first_xla_call_module_op( self._output_saved_model_path ) self.assertTrue(re.search('stablehlo.convolution.*xi8>', module_str)) if bias_fn: self.assertTrue(re.search('stablehlo.add.*xi32>', module_str)) # Consider if there is a way to check if activation fusion is properly # done in MLIR level. # Tests that the quantized graph outputs similar values. The rtol and atol # values are arbitrary. self.assertAllClose(new_outputs, expected_outputs, rtol=0.02, atol=0.05) # Due to other meta data, the compression is not exactly 1/4. self.assertLess( testing.get_size_ratio( self._output_saved_model_path, self._input_saved_model_path ), 0.61, ) if merge_fusion_with_dequantize: # Check activation functions are explicitly present. # If present the last op before return should be stablehlo.clamp for relu6 # and stablehlo.maximum for relu. if activation_fn is nn_ops.relu6: self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return') elif activation_fn is nn_ops.relu: self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return') else: # Check activation functions are implicit. self.assertNotRegex(module_str, r'stablehlo.clamp.*\n.*return') self.assertNotRegex(module_str, r'stablehlo.maximum.*\n.*return') if has_func_alias: func_aliases = self._get_function_aliases( self._output_saved_model_path, [tag_constants.SERVING] ) self.assertCountEqual( func_aliases.values(), [quantize_model_test_base.FUNC_ALIAS] ) @parameterized.parameters( testing.parameter_combinations([{ 'equation': ( 'abc,cde->abde', 'abc,dce->abde', ), }]) ) def test_einsum_ptq_model( self, equation: str, ): _, y_shape, bias_shape, x_signature, y_signature = ( self._prepare_sample_einsum_datashapes(equation, use_bias=True) ) model = self._create_einsum_model( self._input_saved_model_path, equation, y_shape, x_signature, y_signature, bias_shape, ) # Generate model input data. rng = np.random.default_rng(seed=42) input_data = ops.convert_to_tensor( rng.uniform(low=0.0, high=1.0, size=x_signature).astype('f4') ) def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(100): yield { 'x': ops.convert_to_tensor( np.random.uniform(low=0.0, high=1.0, size=x_signature).astype( 'f4' ) ), } dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ) ] ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), ) quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) expected_outputs = model.einsum_with_kernel(input_data) root = load.load(self._output_saved_model_path) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) new_outputs = root.signatures['serving_default']( x=ops.convert_to_tensor(input_data) ) # Tests that the quantized graph outputs similar values. The rtol and atol # values are arbitrary. self.assertAllClose(new_outputs, expected_outputs, rtol=0.02, atol=0.04) # Due to other meta data, the compression is not exactly 1/4. self.assertLess( testing.get_size_ratio( self._output_saved_model_path, self._input_saved_model_path ), 0.65, ) def test_reuse_calibration_data(self): _, y_shape, bias_shape, x_signature, y_signature = ( self._prepare_sample_einsum_datashapes('abc,cde->abde', use_bias=True) ) self._create_einsum_model( self._input_saved_model_path, 'abc,cde->abde', y_shape, x_signature, y_signature, bias_shape, ) # Generate model input data. rng = np.random.default_rng(seed=42) input_data = ops.convert_to_tensor( rng.uniform(low=0.0, high=1.0, size=x_signature).astype('f4') ) def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(100): yield { 'x': ops.convert_to_tensor( np.random.uniform(low=0.0, high=1.0, size=x_signature).astype( 'f4' ) ), } dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) calibration_data_dir = self.create_tempdir('calibration_data').full_path config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ) ] ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), calibration_options=qc.CalibrationOptions( calibration_method=_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX, calibration_data_dir=calibration_data_dir, ), ) # Run quantization the first time, calibration is expected to be run. with self.assertLogs(level='INFO') as info_logs: quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) self.assertTrue( self._any_log_contains( 'Calibration step is executed in graph mode.', info_logs.records, ) ) module_str = self._extract_first_xla_call_module_op( self._output_saved_model_path ) self.assertTrue( re.search('stablehlo.dot_general.*xi8>.*xi8>.*xi32>', module_str) ) # Run quantization the first time, calibration is expected to be skipped. output_saved_model_path_2 = self.create_tempdir('output2').full_path with self.assertLogs(level='INFO') as info_logs: quantization.quantize_saved_model( self._input_saved_model_path, output_saved_model_path_2, config, ) self.assertFalse( self._any_log_contains( 'Calibration step is executed in graph mode.', info_logs.records, ) ) module_str = self._extract_first_xla_call_module_op( output_saved_model_path_2 ) self.assertTrue( re.search('stablehlo.dot_general.*xi8>.*xi8>.*xi32>', module_str) ) # Expect both quantized model to produce the same results. root = load.load(self._output_saved_model_path) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) new_outputs_1 = root.signatures['serving_default']( x=ops.convert_to_tensor(input_data) ) root = load.load(output_saved_model_path_2) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) new_outputs_2 = root.signatures['serving_default']( x=ops.convert_to_tensor(input_data) ) self.assertAllClose(new_outputs_1, new_outputs_2) @parameterized.named_parameters( ('use_constant_with_int32_input', np.int32, False), ('use_variable_with_int32_input', np.int32, True), ('use_constant_with_int64_input', np.int64, False), ('use_variable_with_int64_input', np.int64, True), ) @test_util.run_v2_only def test_gather_model(self, input_type, use_variable): model = self._create_gather_model(input_type, use_variable) save.save(model, self._input_saved_model_path) rng = np.random.default_rng(seed=42) static_input_shape = [6] def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(100): yield { 'input_tensor': rng.uniform( low=0.0, high=10, size=static_input_shape ).astype(input_type) } dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ) ] ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), ) quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) root = load.load(self._output_saved_model_path) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) module_str = self._extract_first_xla_call_module_op( self._output_saved_model_path ) self.assertTrue(re.search('stablehlo.gather.*xi8>', module_str)) # Due to other meta data, the compression is not exactly 1/4. self.assertLess( testing.get_size_ratio( self._output_saved_model_path, self._input_saved_model_path ), 1 / 3, ) def test_when_preset_not_srq_raises_error(self): self._create_matmul_model( input_shape=(1, 1024), weight_shape=(1024, 3), saved_model_path=self._input_saved_model_path, ) config = qc.QuantizationConfig() with self.assertRaisesRegex(ValueError, 'only supports static-range PTQ'): quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) @test_util.run_in_graph_and_eager_modes def test_ptq_denylist_basic(self): """Tests that the op is not quantized when no quantization is enabled.""" input_shape = (1, 2) model = self._create_matmul_model( input_shape, weight_shape=(2, 3), saved_model_path=self._input_saved_model_path, ) rng = np.random.default_rng(1230) random_tensor_gen_fn = lambda: rng.uniform( low=0.0, high=1.0, size=input_shape ).astype(np.float32) def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(50): yield {'input_tensor': random_tensor_gen_fn()} dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ) ] ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), # Disable quantization for the quantizable unit (lifted function) whose # function name starts with "composite_dot_general". specs=qc.QuantizationSpecs( specs=[ qc.QuantizationSpec( matcher=qc.MatcherSpec( function_name=qc.FunctionNameMatcherSpec( regex='composite_dot_general.*' ) ), method=qc.Method(no_quantization={}), ) ] ), ) quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) input_data = ops.convert_to_tensor(random_tensor_gen_fn()) expected_outputs = model.matmul(input_data) root = load.load(self._output_saved_model_path) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) new_outputs = root.signatures['serving_default']( input_tensor=ops.convert_to_tensor(input_data) ) # Indirectly tests that the model is not quantized by asserting that there # are negligible numeric difference. self.assertAllClose(new_outputs, expected_outputs, rtol=0.000001) # Due to other meta data, the compression is not exactly 1/4. self.assertLess( testing.get_size_ratio( self._output_saved_model_path, self._input_saved_model_path ), 0.4, ) @test_util.run_in_graph_and_eager_modes def test_ptq_selective_denylist(self): """Tests that the op is not quantized when no quantization is enabled.""" rng = np.random.default_rng(1230) random_tensor_gen_fn = lambda shape: rng.uniform( low=-1.0, high=1.0, size=shape ).astype(np.float32) class TwoMatmulModel(module.Module): """A model with two matmul ops.""" @def_function.function def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: """Performs a matrix multiplication. Args: input_tensor: Input tensor to matmul with the filter. Returns: A 'output' -> output tensor mapping """ out = math_ops.matmul(input_tensor, random_tensor_gen_fn((2, 3))) out = math_ops.matmul(out, random_tensor_gen_fn((3, 4))) return {'output': out} model = TwoMatmulModel() input_shape = (1, 2) save.save( model, self._input_saved_model_path, signatures=model.matmul.get_concrete_function( tensor_spec.TensorSpec( shape=input_shape, dtype=dtypes.float32, name='input_tensor' ) ), ) def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(50): yield {'input_tensor': random_tensor_gen_fn(input_shape)} dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ), ], ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), # Disable quantization for the quantizable unit (lifted function) whose # function name matches "composite_dot_general_fn_1". # "composite_dot_general_fn_2" will be quantized. specs=qc.QuantizationSpecs( specs=[ qc.QuantizationSpec( matcher=qc.MatcherSpec( function_name=qc.FunctionNameMatcherSpec( regex='composite_dot_general_fn_1' ) ), method=qc.Method(no_quantization={}), ) ] ), ) quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) input_data = ops.convert_to_tensor(random_tensor_gen_fn(input_shape)) expected_outputs = model.matmul(input_data) root = load.load(self._output_saved_model_path) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) new_outputs = root.signatures['serving_default']( input_tensor=ops.convert_to_tensor(input_data) ) # Indirectly tests that the model is only partially quantized. self.assertAllClose(new_outputs, expected_outputs, rtol=0.011) # Due to other meta data, the compression is not exactly 1/4. self.assertLess( testing.get_size_ratio( self._output_saved_model_path, self._input_saved_model_path ), 0.55, ) @test_util.run_in_graph_and_eager_modes def test_ptq_quantization_method_not_applied_when_matcher_mismatch(self): """Tests that quantization method is not applied to unmatched units.""" input_shape = (1, 2) model = self._create_matmul_model( input_shape, weight_shape=(2, 3), saved_model_path=self._input_saved_model_path, ) rng = np.random.default_rng(1230) random_tensor_gen_fn = lambda: rng.uniform( low=0.0, high=1.0, size=input_shape ).astype(np.float32) def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(50): yield {'input_tensor': random_tensor_gen_fn()} dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ) ] ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), specs=qc.QuantizationSpecs( specs=[ qc.QuantizationSpec( # Provide a regex that wouldn't match any quantizable units. matcher=qc.MatcherSpec( function_name=qc.FunctionNameMatcherSpec( regex='.*invalid_function_name.*' ), ), method=qc.Method(no_quantization={}), ), ], ), ) quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) input_data = ops.convert_to_tensor(random_tensor_gen_fn()) expected_outputs = model.matmul(input_data) root = load.load(self._output_saved_model_path) self.assertCountEqual(root.signatures.keys(), {'serving_default'}) new_outputs = root.signatures['serving_default']( input_tensor=ops.convert_to_tensor(input_data) ) # Tests that the quantized graph outputs similar values. They also shouldn't # be exactly the same. Indirectly proves that the `FunctionNameMatcherSpec` # with regex '.*invalid_function_name.*' did not match the quantizable unit. self.assertAllClose(new_outputs, expected_outputs, rtol=0.04) self.assertNotAllClose(new_outputs, expected_outputs, 1e-7) # Due to other meta data, the compression is not exactly 1/4. self.assertLess( testing.get_size_ratio( self._output_saved_model_path, self._input_saved_model_path ), 0.4, ) def test_save_quantization_report_file(self): """Tests that the quantization report file is created. Also test that it is populated with textproto of `QuantizationResults`. """ input_shape = (1, 16) filter_shape = (16, 3) self._create_matmul_model( input_shape, filter_shape, self._input_saved_model_path, ) rng = np.random.default_rng(seed=42) def data_gen() -> repr_dataset.RepresentativeDataset: for _ in range(100): yield { 'input_tensor': rng.uniform( low=0.0, high=1.0, size=input_shape ).astype(np.float32) } dataset_path = self.create_tempfile('tfrecord').full_path path_map = {'serving_default': dataset_path} repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save( {'serving_default': data_gen()} ) report_file_path = self.create_tempfile('report.txtpb').full_path config = qc.QuantizationConfig( static_range_ptq_preset=qc.StaticRangePtqPreset( representative_datasets=[ qc.RepresentativeDatasetConfig( tf_record=qc.TfRecordFile(path=dataset_path) ) ] ), tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]), report_file_path=report_file_path, ) quantization.quantize_saved_model( self._input_saved_model_path, self._output_saved_model_path, config, ) # Test the contents of the report file, which is a textproto of # `QuantizationResults`. self.assertTrue(os.path.exists(report_file_path)) with open(report_file_path, 'r') as f: quantization_results_textpb = f.read() results = qc.QuantizationResults() text_format.Parse(quantization_results_textpb, results) self.assertProtoEquals( expected_message_maybe_ascii=r""" results { quantizable_unit { name: "composite_dot_general_fn_1" } method { static_range_ptq {} } } """, validate_message=results, ) @test_util.run_all_in_graph_and_eager_modes
StaticRangeQuantizationTest
python
great-expectations__great_expectations
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_us_zipcode_within_mile_radius_of_given_zipcode.py
{ "start": 1153, "end": 3253 }
class ____(ColumnMapMetricProvider): """ Determines whether a US zip code is within a the given radius in miles of another given zip code. requirements: uszipcode """ # This is the id string that will be used to reference your metric. # Please see {some doc} for information on how to choose an id string for your Metric. condition_metric_name = "column_values.us_zipcode_within_radius_of_given_zipcode" condition_value_keys = ("central_zip", "radius_in_miles") # This method defines the business logic for evaluating your metric when using a PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, central_zip=None, radius_in_miles=10, **kwargs): search = uszipcode.SearchEngine() center_zipcode_object = search.by_zipcode(central_zip) def _find_distance_between_zipcodes( center_lat, center_long, zipcode: int, search: uszipcode.search.SearchEngine ): zipcode_object = search.by_zipcode(zipcode) return zipcode_object.dist_from(lat=center_lat, lng=center_long) return column.apply( lambda loc: _find_distance_between_zipcodes( center_lat=center_zipcode_object.lat, center_long=center_zipcode_object.lng, zipcode=int(loc), search=search, ) <= radius_in_miles ) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # return column.in_([3]) # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # return column.isin([3]) # This class defines the Expectation itself # The main business logic for calculation lives here.
ColumnValuesAreUSZipcodeWithinMileRadiusOfGivenZipcode
python
redis__redis-py
tests/test_connection_pool.py
{ "start": 15172, "end": 16083 }
class ____: def test_extra_typed_querystring_options(self): pool = redis.BlockingConnectionPool.from_url( "redis://localhost/2?socket_timeout=20&socket_connect_timeout=10" "&socket_keepalive=&retry_on_timeout=Yes&max_connections=10&timeout=42" ) assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { "host": "localhost", "db": 2, "socket_timeout": 20.0, "socket_connect_timeout": 10.0, "retry_on_timeout": True, } assert pool.max_connections == 10 assert pool.timeout == 42.0 def test_invalid_extra_typed_querystring_options(self): with pytest.raises(ValueError): redis.BlockingConnectionPool.from_url( "redis://localhost/2?timeout=_not_a_float_" )
TestBlockingConnectionPoolURLParsing
python
pytorch__pytorch
torch/_dynamo/source.py
{ "start": 20551, "end": 20963 }
class ____(ChainedSource): def __post_init__(self) -> None: assert self.base is not None def reconstruct(self, codegen: "PyCodegen") -> None: codegen(self.base) def guard_source(self) -> GuardSource: return self.base.guard_source() def name(self) -> str: return f"{self.base.name()}.__obj_flatten__()" @dataclasses.dataclass(frozen=True)
FlattenScriptObjectSource
python
tox-dev__tox
src/tox/tox_env/python/virtual_env/package/pyproject.py
{ "start": 17506, "end": 21532 }
class ____(Frontend): def __init__(self, root: Path, env: Pep517VenvPackager) -> None: super().__init__(*Frontend.create_args_from_folder(root)) self._tox_env = env self._backend_executor_: LocalSubProcessPep517Executor | None = None into: dict[str, Any] = {} for hook in chain( (f"get_requires_for_build_{build_type}" for build_type in ["editable", "wheel", "sdist"]), (f"prepare_metadata_for_build_{build_type}" for build_type in ["editable", "wheel"]), (f"build_{build_type}" for build_type in ["editable", "wheel", "sdist"]), ): # wrap build methods in a cache wrapper def key(*args: Any, bound_return: str = hook, **kwargs: Any) -> str: # noqa: ARG001 return bound_return setattr(self, hook, cached(into, key=key)(getattr(self, hook))) @property def backend_cmd(self) -> Sequence[str]: return ["python", *self.backend_args] def _send(self, cmd: str, **kwargs: Any) -> tuple[Any, str, str]: try: if self._can_skip_prepare(cmd): return None, "", "" # will need to build wheel either way, avoid prepare return super()._send(cmd, **kwargs) except BackendFailed as exception: raise exception if isinstance(exception, ToxBackendFailed) else ToxBackendFailed(exception) from exception def _can_skip_prepare(self, cmd: str) -> bool: # given we'll build a wheel we might skip the prepare step return cmd in {"prepare_metadata_for_build_wheel", "prepare_metadata_for_build_editable"} and ( "wheel" in self._tox_env.builds or "editable" in self._tox_env.builds ) @contextmanager def _send_msg( self, cmd: str, result_file: Path, # noqa: ARG002 msg: str, ) -> Iterator[ToxCmdStatus]: try: with self._tox_env.execute_async( cmd=self.backend_cmd, cwd=self._root, stdin=StdinSource.API, show=None, run_id=cmd, executor=self.backend_executor, ) as execute_status: execute_status.write_stdin(f"{msg}{os.linesep}") yield ToxCmdStatus(execute_status) outcome = execute_status.outcome if outcome is not None: # pragma: no branch outcome.assert_success() finally: if self._tox_env.conf["fresh_subprocess"]: self.backend_executor.close() def _unexpected_response( self, cmd: str, got: Any, expected_type: Any, out: str, err: str, ) -> NoReturn: try: super()._unexpected_response(cmd, got, expected_type, out, err) except BackendFailed as exception: raise exception if isinstance(exception, ToxBackendFailed) else ToxBackendFailed(exception) from exception @property def backend_executor(self) -> LocalSubProcessPep517Executor: if self._backend_executor_ is None: environment_variables = self._tox_env.environment_variables.copy() backend = os.pathsep.join(str(i) for i in self._backend_paths).strip() if backend: environment_variables["PYTHONPATH"] = backend self._backend_executor_ = LocalSubProcessPep517Executor( colored=self._tox_env.options.is_colored, cmd=self.backend_cmd, env=environment_variables, cwd=self._root, ) return self._backend_executor_ @contextmanager def _wheel_directory(self) -> Iterator[Path]: yield self._tox_env.pkg_dir # use our local wheel directory for building wheel @impl def tox_register_tox_env(register: ToxEnvRegister) -> None: register.add_package_env(Pep517VirtualEnvPackager) __all__ = [ "Pep517VenvPackager", "Pep517VirtualEnvPackager", ]
Pep517VirtualEnvFrontend
python
openai__openai-python
src/openai/types/fine_tuning/checkpoints/permission_create_params.py
{ "start": 252, "end": 407 }
class ____(TypedDict, total=False): project_ids: Required[SequenceNotStr[str]] """The project identifiers to grant access to."""
PermissionCreateParams
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/properties.py
{ "start": 3723, "end": 4071 }
class ____: def __init__(self, z: str) -> None: self.z = z @property def attribute(self) -> PropertyCallableReturn: _test_sink(self.z) return PropertyCallableReturn(_test_source()) def test_property_callable(): obj = PropertyCallable(_test_source()) return obj.attribute(_test_source())
PropertyCallable
python
django__django
django/db/models/functions/math.py
{ "start": 2875, "end": 2950 }
class ____(Transform): function = "FLOOR" lookup_name = "floor"
Floor
python
h5py__h5py
h5py/tests/test_attrs.py
{ "start": 856, "end": 1204 }
class ____(TestCase): """ Feature: AttributeManager provide a helpful __repr__ string """ def test_repr(self): grp = self.f.create_group(make_name()) grp.attrs.create('att', 1) self.assertIsInstance(repr(grp.attrs), str) grp.id.close() self.assertIsInstance(repr(grp.attrs), str)
TestRepr
python
huggingface__transformers
src/transformers/models/dia/modeling_dia.py
{ "start": 2641, "end": 3937 }
class ____(nn.Module): """In order to efficiently compute the audio embedding from the 9 different channels, we vectorize the embedding process by using a single embedding layer and an offset. Example: - num_embeds = 4 - vocab_size = 8 - num_channels = 3 We would have offsets = [0, 8, 16] If audio_codes = [0, 1, 2, 3], [1, 3, 4, 7], [5, 6, 7, 8], then tokens = audio_codes + offsets = [0, 1, 2, 3, 9, 11, 12, 15, 21, 22, 23, 24] This allows us to use a single embedding layer for all channels. """ def __init__(self, config: DiaDecoderConfig): super().__init__() self.embed = nn.Embedding(config.vocab_size * config.num_channels, config.hidden_size) self.hidden_size = config.hidden_size self.num_channels = config.num_channels offsets = torch.arange(config.num_channels, dtype=torch.long) * config.vocab_size # (C,) self.register_buffer("offsets", offsets, persistent=False) def forward(self, audio_codes: torch.Tensor) -> torch.Tensor: tokens = (audio_codes + self.offsets.to(audio_codes.device)).squeeze(1) embeds = self.embed(tokens).view(tokens.shape[0], audio_codes.shape[1], -1, self.hidden_size) return embeds.sum(dim=2)
DiaMultiChannelEmbedding
python
pennersr__django-allauth
tests/apps/socialaccount/providers/eveonline/tests.py
{ "start": 246, "end": 799 }
class ____(OAuth2TestsMixin, TestCase): provider_id = EveOnlineProvider.id def get_mocked_response(self): return MockedResponse( HTTPStatus.OK, """ { "CharacterID": 273042051, "CharacterName": "CCP illurkall", "ExpiresOn": "2014-05-23T15:01:15.182864Z", "Scopes": " ", "TokenType": "Character", "CharacterOwnerHash": "XM4D...FoY=" }""", ) def get_expected_to_str(self): return "CCP illurkall"
EveOnlineTests
python
huggingface__transformers
src/transformers/models/sew_d/modeling_sew_d.py
{ "start": 20594, "end": 22194 }
class ____(torch.autograd.Function): """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" @staticmethod def forward(ctx, input, local_ctx): mask, dropout = get_mask(input, local_ctx) ctx.scale = 1.0 / (1 - dropout) if dropout > 0: ctx.save_for_backward(mask) return input.masked_fill(mask, 0) * ctx.scale else: return input @staticmethod def backward(ctx, grad_output): if ctx.scale > 1: (mask,) = ctx.saved_tensors return grad_output.masked_fill(mask, 0) * ctx.scale, None else: return grad_output, None @staticmethod def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value: from torch.onnx import symbolic_opset12 dropout_p = local_ctx if isinstance(local_ctx, DropoutContext): dropout_p = local_ctx.dropout # StableDropout only calls this function when training. train = True # TODO: We should check if the opset_version being used to export # is > 12 here, but there's no good way to do that. As-is, if the # opset_version < 12, export will fail with a CheckerError. # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like: # if opset_version < 12: # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train) return symbolic_opset12.dropout(g, input, dropout_p, train)
XDropout
python
google__pytype
pytype/rewrite/load_abstract.py
{ "start": 188, "end": 1434 }
class ____: """Store of constants and singletons. Constants should be accessed via self[<raw value>], which creates the constant if it does not exist. Under the hood, constants are stored in self._consts. Singletons are stored in self.singles and should be accessed via self.singles[<name>]. For convenience, the Any singleton can also be accessed as self.Any. """ _SINGLETONS = ('Any', '__build_class__', 'Never', 'NULL') def __init__(self, ctx: abstract.ContextType): self._ctx = ctx self._consts: dict[Any, abstract.PythonConstant] = {} self.singles: dict[str, abstract.Singleton] = {} for single in self._SINGLETONS: self.singles[single] = abstract.Singleton( ctx, single, allow_direct_instantiation=True ) # We use Any all the time, so alias it for convenience. self.Any = self.singles['Any'] # pylint: disable=invalid-name def __getitem__(self, const: Any): if const not in self._consts: self._consts[const] = abstract.PythonConstant( self._ctx, const, allow_direct_instantiation=True ) return self._consts[const] # This is a workaround for a weird pytype crash caused by the use of 'Any' as an # attribute name. Constants: Any
Constants
python
joke2k__faker
faker/providers/color/es_CL/__init__.py
{ "start": 63, "end": 103 }
class ____(ColorProvider): pass
Provider
python
sympy__sympy
sympy/logic/algorithms/lra_theory.py
{ "start": 27955, "end": 30115 }
class ____: """ Represents an upper or lower bound or an equality between a symbol and some constant. """ def __init__(self, var, const, upper, equality, strict=None): if not equality in [True, False]: assert equality in [True, False] self.var = var if isinstance(const, tuple): s = const[1] != 0 if strict: assert s == strict self.bound = const[0] self.strict = s else: self.bound = const self.strict = strict self.upper = upper if not equality else None self.equality = equality self.strict = strict assert self.strict is not None @staticmethod def from_upper(var): neg = -1 if var.upper_from_neg else 1 b = Boundary(var, var.upper[0], True, var.upper_from_eq, var.upper[1] != 0) if neg < 0: b = b.get_negated() return b, neg @staticmethod def from_lower(var): neg = -1 if var.lower_from_neg else 1 b = Boundary(var, var.lower[0], False, var.lower_from_eq, var.lower[1] != 0) if neg < 0: b = b.get_negated() return b, neg def get_negated(self): return Boundary(self.var, self.bound, not self.upper, self.equality, not self.strict) def get_inequality(self): if self.equality: return Eq(self.var.var, self.bound) elif self.upper and self.strict: return self.var.var < self.bound elif not self.upper and self.strict: return self.var.var > self.bound elif self.upper: return self.var.var <= self.bound else: return self.var.var >= self.bound def __repr__(self): return repr("Boundary(" + repr(self.get_inequality()) + ")") def __eq__(self, other): other = (other.var, other.bound, other.strict, other.upper, other.equality) return (self.var, self.bound, self.strict, self.upper, self.equality) == other def __hash__(self): return hash((self.var, self.bound, self.strict, self.upper, self.equality))
Boundary
python
pytorch__pytorch
test/higher_order_ops/test_invoke_subgraph.py
{ "start": 64254, "end": 70641 }
class ____(torch.nn.Module): def forward(self, getitem_6: "f32[8, 8]", getitem_5: "f32[8, 8]", getitem_4: "f32[8, 8]", cos: "f32[8, 8]", tangents_1: "f32[8, 8]"): mul: "f32[8, 8]" = torch.ops.aten.mul.Tensor(tangents_1, cos); tangents_1 = cos = None partitioned_bw_subgraph_0_0 = self.partitioned_bw_subgraph_0_0 invoke_subgraph_3 = torch.ops.higher_order.invoke_subgraph(partitioned_bw_subgraph_0_0, 'partitioned_bw_subgraph_0_0', getitem_4, getitem_5, getitem_6, mul); partitioned_bw_subgraph_0_0 = getitem_4 = getitem_5 = getitem_6 = mul = None getitem_1: "f32[8, 8]" = invoke_subgraph_3[0] getitem_2: "f32[8, 8]" = invoke_subgraph_3[1]; invoke_subgraph_3 = None return (getitem_1, getitem_2) class partitioned_bw_subgraph_0_0(torch.nn.Module): def forward(self, mm: "f32[8, 8]", t: "f32[8, 8]", t_1: "f32[8, 8]", tangents_0: "f32[8, 8]"): cos: "f32[8, 8]" = torch.ops.aten.cos.default(mm); mm = None mul: "f32[8, 8]" = torch.ops.aten.mul.Tensor(tangents_0, cos); tangents_0 = cos = None mm_1: "f32[8, 8]" = torch.ops.aten.mm.default(t, mul); t = None mm_2: "f32[8, 8]" = torch.ops.aten.mm.default(mul, t_1); mul = t_1 = None return (mm_2, mm_1) """, ) def test_const_tensor(self): @nested_compile_region def gn(x): return torch.tensor(64, dtype=torch.float32) * x def fn(x): return gn(x) + gn(x) x = torch.randn(64, requires_grad=True) opt_fn = torch.compile(fn, backend="aot_eager", fullgraph=True) ref = fn(x) res = opt_fn(x) self.assertEqual(ref, res) def test_ac(self): def fn1(x): return torch.cos(x) @nested_compile_region def fn1_checkpoint(x): return torch.utils.checkpoint.checkpoint(fn1, x, use_reentrant=False) def fn2(x): return torch.sin(x) @nested_compile_region def fn2_checkpoint(x): return torch.utils.checkpoint.checkpoint(fn2, x, use_reentrant=False) def fn(x): return ( fn1_checkpoint(x) # repeat the same fn1_checkpoint to see that we dedupe + fn1_checkpoint(x) # Check that a new fn2_checkpoint goes through a different HOP + fn2_checkpoint(x) ) x = torch.randn(8, requires_grad=True) ref = fn(x) x_clone = x.clone().detach().requires_grad_(True) backend = AotEagerAndRecordGraphs() res = torch.compile(fn, backend=backend, fullgraph=True)(x_clone) # Run backward ref.sum().backward() res.sum().backward() self.assertEqual(ref, res) self.assertEqual(x.grad, x_clone.grad) # Check that the Dynamo and AOT graphs have just one subgraph module self.assertEqual(len(backend.graphs), 1) self.assertEqual(len(backend.fw_graphs), 1) self.assertEqual(len(backend.bw_graphs), 1) self.count_unique_get_attr_nodes(backend.graphs[0], [], 2) self.count_unique_get_attr_nodes(backend.fw_graphs[0], [], 2) self.count_unique_get_attr_nodes(backend.bw_graphs[0], [], 2) res = torch.compile(fn, backend="inductor", fullgraph=True)(x_clone) self.assertEqual(ref, res) @torch._inductor.config.patch(fallback_random=True) def test_ac_rng(self): def fn1(x): return torch.cos(torch.nn.functional.dropout(x, p=0.5)) @nested_compile_region def fn1_checkpoint(x): return torch.utils.checkpoint.checkpoint(fn1, x, use_reentrant=False) def fn(x): return fn1_checkpoint(x) + fn1_checkpoint(x) x = torch.randn(8, requires_grad=True) torch.manual_seed(0) ref = fn(x) ref.sum().backward() x_clone = x.clone().detach().requires_grad_(True) backend = AotEagerAndRecordGraphs() torch.manual_seed(0) res = torch.compile(fn, backend=backend, fullgraph=True)(x_clone) res.sum().backward() self.assertEqual(ref, res) self.assertEqual(x.grad, x_clone.grad) # Check that the Dynamo and AOT graphs have just one subgraph module self.assertEqual(len(backend.graphs), 1) self.assertEqual(len(backend.fw_graphs), 1) self.assertEqual(len(backend.bw_graphs), 1) torch.manual_seed(0) res = torch.compile(fn, backend="inductor", fullgraph=True)(x_clone) self.assertEqual(ref, res) res.sum().backward() @requires_gpu def test_ac_rng_cudagraphs(self): def fn1(q, k, v): return torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=None, dropout_p=0.5, is_causal=True ) @nested_compile_region def fn1_checkpoint(q, k, v): return torch.utils.checkpoint.checkpoint(fn1, q, k, v, use_reentrant=False) def fn(q, k, v): return fn1_checkpoint(q, k, v) + fn1_checkpoint(q.cos(), k, v) q = torch.randn( 1, 1, 32, 32, device=GPU_TYPE, dtype=torch.bfloat16, requires_grad=True ) k = torch.randn( 1, 1, 32, 32, device=GPU_TYPE, dtype=torch.bfloat16, requires_grad=True ) v = torch.randn( 1, 1, 32, 32, device=GPU_TYPE, dtype=torch.bfloat16, requires_grad=True ) res = torch.compile( fn, backend="inductor", fullgraph=True, mode="reduce-overhead" )(q, k, v) res.sum().backward() def test_fake_tensor_checking(self): @nested_compile_region def gn(x): return torch.sin(x) def fn(x, y): # x and y are different shapes, so we should use different graph return gn(x), gn(y) backend = AotEagerAndRecordGraphs() opt_fn = torch.compile(fn, backend=backend, fullgraph=True) x = torch.randn(8, 8, requires_grad=True) y = torch.randn(16, 16, requires_grad=True) ref = fn(x, y) res = opt_fn(x, y) self.assertEqual(ref, res) if not TEST_WITH_CROSSREF: self.assertExpectedInline( normalize_gm(backend.graphs[0].print_readable(print_output=False)), """\
GraphModule
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py
{ "start": 37088, "end": 48943 }
class ____: @mock.patch(VERTEX_AI_PATH.format("custom_job.Dataset")) @mock.patch(VERTEX_AI_PATH.format("custom_job.CustomJobHook")) def test_execute(self, mock_hook, mock_dataset): mock_hook.return_value.create_custom_training_job.return_value = ( None, "training_id", "custom_job_id", ) op = CreateCustomTrainingJobOperator( task_id=TASK_ID, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, staging_bucket=STAGING_BUCKET, display_name=DISPLAY_NAME, script_path=PYTHON_PACKAGE, args=PYTHON_PACKAGE_CMDARGS, container_uri=CONTAINER_URI, model_serving_container_image_uri=CONTAINER_URI, requirements=[], replica_count=1, region=GCP_LOCATION, project_id=GCP_PROJECT, dataset_id=TEST_DATASET_ID, parent_model=TEST_PARENT_MODEL, ) op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()}) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN) mock_dataset.assert_called_once_with(name=TEST_DATASET_ID) mock_hook.return_value.create_custom_training_job.assert_called_once_with( staging_bucket=STAGING_BUCKET, display_name=DISPLAY_NAME, args=PYTHON_PACKAGE_CMDARGS, container_uri=CONTAINER_URI, model_serving_container_image_uri=CONTAINER_URI, script_path=PYTHON_PACKAGE, requirements=[], dataset=mock_dataset.return_value, model_display_name=None, replica_count=REPLICA_COUNT, machine_type=MACHINE_TYPE, accelerator_type=ACCELERATOR_TYPE, accelerator_count=ACCELERATOR_COUNT, training_fraction_split=None, validation_fraction_split=None, test_fraction_split=None, parent_model=TEST_PARENT_MODEL, region=GCP_LOCATION, project_id=GCP_PROJECT, model_serving_container_predict_route=None, model_serving_container_health_route=None, model_serving_container_command=None, model_serving_container_args=None, model_serving_container_environment_variables=None, model_serving_container_ports=None, model_description=None, model_instance_schema_uri=None, model_parameters_schema_uri=None, model_prediction_schema_uri=None, labels=None, training_encryption_spec_key_name=None, model_encryption_spec_key_name=None, # RUN annotation_schema_uri=None, model_labels=None, base_output_dir=None, service_account=None, network=None, bigquery_destination=None, environment_variables=None, boot_disk_type="pd-ssd", boot_disk_size_gb=100, training_filter_split=None, validation_filter_split=None, test_filter_split=None, predefined_split_column_name=None, timestamp_split_column_name=None, tensorboard=None, sync=True, is_default_version=None, model_version_aliases=None, model_version_description=None, psc_interface_config=None, ) @mock.patch(VERTEX_AI_PATH.format("custom_job.Dataset")) @mock.patch(VERTEX_AI_PATH.format("custom_job.CustomJobHook")) def test_execute__parent_model_version_index_is_removed(self, mock_hook, mock_dataset): mock_hook.return_value.create_custom_training_job.return_value = ( None, "training_id", "custom_job_id", ) op = CreateCustomTrainingJobOperator( task_id=TASK_ID, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, staging_bucket=STAGING_BUCKET, display_name=DISPLAY_NAME, script_path=PYTHON_PACKAGE, args=PYTHON_PACKAGE_CMDARGS, container_uri=CONTAINER_URI, model_serving_container_image_uri=CONTAINER_URI, requirements=[], replica_count=1, region=GCP_LOCATION, project_id=GCP_PROJECT, dataset_id=TEST_DATASET_ID, parent_model=VERSIONED_TEST_PARENT_MODEL, ) op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()}) mock_hook.return_value.create_custom_training_job.assert_called_once_with( staging_bucket=STAGING_BUCKET, display_name=DISPLAY_NAME, args=PYTHON_PACKAGE_CMDARGS, container_uri=CONTAINER_URI, model_serving_container_image_uri=CONTAINER_URI, script_path=PYTHON_PACKAGE, requirements=[], dataset=mock_dataset.return_value, model_display_name=None, replica_count=REPLICA_COUNT, machine_type=MACHINE_TYPE, accelerator_type=ACCELERATOR_TYPE, accelerator_count=ACCELERATOR_COUNT, training_fraction_split=None, validation_fraction_split=None, test_fraction_split=None, parent_model=TEST_PARENT_MODEL, region=GCP_LOCATION, project_id=GCP_PROJECT, model_serving_container_predict_route=None, model_serving_container_health_route=None, model_serving_container_command=None, model_serving_container_args=None, model_serving_container_environment_variables=None, model_serving_container_ports=None, model_description=None, model_instance_schema_uri=None, model_parameters_schema_uri=None, model_prediction_schema_uri=None, labels=None, training_encryption_spec_key_name=None, model_encryption_spec_key_name=None, # RUN annotation_schema_uri=None, model_labels=None, base_output_dir=None, service_account=None, network=None, bigquery_destination=None, environment_variables=None, boot_disk_type="pd-ssd", boot_disk_size_gb=100, training_filter_split=None, validation_filter_split=None, test_filter_split=None, predefined_split_column_name=None, timestamp_split_column_name=None, tensorboard=None, sync=True, is_default_version=None, model_version_aliases=None, model_version_description=None, psc_interface_config=None, ) @mock.patch(VERTEX_AI_PATH.format("custom_job.CreateCustomTrainingJobOperator.hook")) def test_execute_enters_deferred_state(self, mock_hook): task = CreateCustomTrainingJobOperator( task_id=TASK_ID, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, staging_bucket=STAGING_BUCKET, display_name=DISPLAY_NAME, script_path=PYTHON_PACKAGE, args=PYTHON_PACKAGE_CMDARGS, container_uri=CONTAINER_URI, model_serving_container_image_uri=CONTAINER_URI, requirements=[], replica_count=1, region=GCP_LOCATION, project_id=GCP_PROJECT, deferrable=True, ) mock_hook.return_value.exists.return_value = False with pytest.raises(TaskDeferred) as exc: task.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()}) assert isinstance(exc.value.trigger, CustomTrainingJobTrigger), ( "Trigger is not a CustomTrainingJobTrigger" ) @mock.patch(VERTEX_AI_LINKS_PATH.format("VertexAIModelLink.persist")) @mock.patch(VERTEX_AI_PATH.format("custom_job.CreateCustomTrainingJobOperator.hook.extract_model_id")) @mock.patch(VERTEX_AI_PATH.format("custom_job.CreateCustomTrainingJobOperator.hook")) def test_execute_complete_success( self, mock_hook, hook_extract_model_id, mock_link_persist, ): task = CreateCustomTrainingJobOperator( task_id=TASK_ID, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, staging_bucket=STAGING_BUCKET, display_name=DISPLAY_NAME, script_path=PYTHON_PACKAGE, args=PYTHON_PACKAGE_CMDARGS, container_uri=CONTAINER_URI, model_serving_container_image_uri=CONTAINER_URI, requirements=[], replica_count=1, region=GCP_LOCATION, project_id=GCP_PROJECT, deferrable=True, ) expected_result = TEST_TRAINING_PIPELINE_DATA["model_to_upload"] hook_extract_model_id.return_value = "test-model" mock_ti = mock.MagicMock() actual_result = task.execute_complete( context={"ti": mock_ti}, event={ "status": "success", "message": "", "job": TEST_TRAINING_PIPELINE_DATA, }, ) mock_ti.xcom_push.assert_called_with(key="model_id", value="test-model") mock_link_persist.assert_called_once_with(context={"ti": mock_ti}, model_id="test-model") assert actual_result == expected_result def test_execute_complete_error_status_raises_exception(self): task = CreateCustomTrainingJobOperator( task_id=TASK_ID, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, staging_bucket=STAGING_BUCKET, display_name=DISPLAY_NAME, script_path=PYTHON_PACKAGE, args=PYTHON_PACKAGE_CMDARGS, container_uri=CONTAINER_URI, model_serving_container_image_uri=CONTAINER_URI, replica_count=1, region=GCP_LOCATION, project_id=GCP_PROJECT, deferrable=True, ) with pytest.raises(AirflowException): task.execute_complete(context=None, event={"status": "error", "message": "test message"}) @mock.patch(VERTEX_AI_LINKS_PATH.format("VertexAIModelLink.persist")) @mock.patch(VERTEX_AI_PATH.format("custom_job.CreateCustomTrainingJobOperator.hook.extract_model_id")) @mock.patch(VERTEX_AI_PATH.format("custom_job.CreateCustomTrainingJobOperator.hook")) def test_execute_complete_no_model_produced( self, mock_hook, hook_extract_model_id, mock_link_persist, ): task = CreateCustomTrainingJobOperator( task_id=TASK_ID, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, staging_bucket=STAGING_BUCKET, display_name=DISPLAY_NAME, script_path=PYTHON_PACKAGE, args=PYTHON_PACKAGE_CMDARGS, container_uri=CONTAINER_URI, requirements=[], replica_count=1, region=GCP_LOCATION, project_id=GCP_PROJECT, deferrable=True, ) expected_result = None hook_extract_model_id.return_value = None mock_ti = mock.MagicMock() mock_context = {"ti": mock_ti} actual_result = task.execute_complete( context=mock_context, event={"status": "success", "message": "", "job": {}} ) mock_ti.xcom_push.assert_called_once() mock_link_persist.assert_not_called() assert actual_result == expected_result
TestVertexAICreateCustomTrainingJobOperator
python
sphinx-doc__sphinx
sphinx/builders/latex/transforms.py
{ "start": 15455, "end": 16475 }
class ____(SphinxPostTransform): """Gather bibliography entries to tail of document. Before:: <document> <paragraph> blah blah blah <citation> ... <paragraph> blah blah blah <citation> ... ... After:: <document> <paragraph> blah blah blah <paragraph> blah blah blah ... <thebibliography> <citation> ... <citation> ... """ default_priority = 750 formats = ('latex',) def run(self, **kwargs: Any) -> None: citations = thebibliography() for node in list(self.document.findall(nodes.citation)): node.parent.remove(node) citations += node if len(citations) > 0: self.document += citations # type: ignore[attr-defined]
BibliographyTransform
python
facebookresearch__faiss
tests/test_contrib.py
{ "start": 2898, "end": 5635 }
class ____(unittest.TestCase): def test_knn_cpu(self): xb = np.random.rand(200, 32).astype('float32') xq = np.random.rand(100, 32).astype('float32') index = faiss.IndexFlatL2(32) index.add(xb) Dref, Iref = index.search(xq, 10) Dnew, Inew = knn(xq, xb, 10) assert np.all(Inew == Iref) assert np.allclose(Dref, Dnew) index = faiss.IndexFlatIP(32) index.add(xb) Dref, Iref = index.search(xq, 10) Dnew, Inew = knn(xq, xb, 10, metric=faiss.METRIC_INNER_PRODUCT) assert np.all(Inew == Iref) assert np.allclose(Dref, Dnew) def do_test_range(self, metric): ds = datasets.SyntheticDataset(32, 0, 1000, 10) xq = ds.get_queries() xb = ds.get_database() D, I = faiss.knn(xq, xb, 10, metric=metric) threshold = float(D[:, -1].mean()) index = faiss.IndexFlat(32, metric) index.add(xb) ref_lims, ref_D, ref_I = index.range_search(xq, threshold) new_lims, new_D, new_I = range_ground_truth( xq, ds.database_iterator(bs=100), threshold, ngpu=0, metric_type=metric) evaluation.check_ref_range_results( ref_lims, ref_D, ref_I, new_lims, new_D, new_I ) def test_range_L2(self): self.do_test_range(faiss.METRIC_L2) def test_range_IP(self): self.do_test_range(faiss.METRIC_INNER_PRODUCT) def test_query_iterator(self, metric=faiss.METRIC_L2): ds = datasets.SyntheticDataset(32, 0, 1000, 1000) xq = ds.get_queries() xb = ds.get_database() D, I = faiss.knn(xq, xb, 10, metric=metric) threshold = float(D[:, -1].mean()) index = faiss.IndexFlat(32, metric) index.add(xb) ref_lims, ref_D, ref_I = index.range_search(xq, threshold) def matrix_iterator(xb, bs): for i0 in range(0, xb.shape[0], bs): yield xb[i0:i0 + bs] # check repro OK _, new_lims, new_D, new_I = range_search_max_results( index, matrix_iterator(xq, 100), threshold, max_results=1e10) evaluation.check_ref_range_results( ref_lims, ref_D, ref_I, new_lims, new_D, new_I ) max_res = ref_lims[-1] // 2 new_threshold, new_lims, new_D, new_I = range_search_max_results( index, matrix_iterator(xq, 100), threshold, max_results=max_res) self.assertLessEqual(new_lims[-1], max_res) ref_lims, ref_D, ref_I = index.range_search(xq, new_threshold) evaluation.check_ref_range_results( ref_lims, ref_D, ref_I, new_lims, new_D, new_I )
TestExhaustiveSearch
python
huggingface__transformers
src/transformers/models/qwen3_next/modular_qwen3_next.py
{ "start": 38135, "end": 38424 }
class ____(LlamaForQuestionAnswering): pass __all__ = [ "Qwen3NextForCausalLM", "Qwen3NextForQuestionAnswering", "Qwen3NextModel", "Qwen3NextPreTrainedModel", "Qwen3NextForSequenceClassification", "Qwen3NextForTokenClassification", ]
Qwen3NextForQuestionAnswering
python
python-markdown__markdown
markdown/extensions/md_in_html.py
{ "start": 19056, "end": 19785 }
class ____(Extension): """Add Markdown parsing in HTML to Markdown class.""" def extendMarkdown(self, md): """ Register extension instances. """ # Replace raw HTML preprocessor md.preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20) # Add `blockprocessor` which handles the placeholders for `etree` elements md.parser.blockprocessors.register( MarkdownInHtmlProcessor(md.parser), 'markdown_block', 105 ) # Replace raw HTML postprocessor md.postprocessors.register(MarkdownInHTMLPostprocessor(md), 'raw_html', 30) def makeExtension(**kwargs): # pragma: no cover return MarkdownInHtmlExtension(**kwargs)
MarkdownInHtmlExtension
python
astropy__astropy
astropy/cosmology/_src/tests/io/test_json.py
{ "start": 2755, "end": 5607 }
class ____(ReadWriteTestMixinBase): """ Tests for a Cosmology[Read/Write] with ``format="json"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ @pytest.fixture(scope="class", autouse=True) def register_and_unregister_json(self): """Setup & teardown for JSON read/write tests.""" # Register readwrite_registry.register_reader("json", Cosmology, read_json, force=True) readwrite_registry.register_writer("json", Cosmology, write_json, force=True) readwrite_registry.register_identifier( "json", Cosmology, json_identify, force=True ) yield # Run all tests in class # Unregister readwrite_registry.unregister_reader("json", Cosmology) readwrite_registry.unregister_writer("json", Cosmology) readwrite_registry.unregister_identifier("json", Cosmology) # ======================================================================== def test_readwrite_json_subclass_partial_info( self, cosmo_cls, cosmo, read, write, tmp_path, add_cu ): """ Test writing from an instance and reading from that class. This works with missing information. """ fp = tmp_path / "test_readwrite_json_subclass_partial_info.json" # test write cosmo.write(fp, format="json") # partial information with fp.open() as file: L = file.readline() L = ( L[: L.index('"cosmology":')] + L[L.index(", ") + 2 :] ) # remove cosmology : #203 i = L.index('"Tcmb0":') # delete Tcmb0 L = ( L[:i] + L[L.index(", ", L.index(", ", i) + 1) + 2 :] ) # second occurrence : #203 tempfname = tmp_path / f"{cosmo.name}_temp.json" tempfname.write_text("".join(L)) # read with the same class that wrote fills in the missing info with # the default value got = cosmo_cls.read(tempfname, format="json") got2 = read(tempfname, format="json", cosmology=cosmo_cls) got3 = read(tempfname, format="json", cosmology=cosmo_cls.__qualname__) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo_cls.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta
ReadWriteJSONTestMixin
python
pypa__hatch
src/hatch/template/__init__.py
{ "start": 158, "end": 780 }
class ____: def __init__(self, path: Path | None, contents: str = ""): self.path = path self.contents = contents self.feature = None def write(self, root): if self.path is None: # no cov return path = root / self.path path.ensure_parent_dir_exists() path.write_text(self.contents, encoding="utf-8") def find_template_files(module): for name in dir(module): obj = getattr(module, name) if obj is File: continue with suppress(TypeError): if issubclass(obj, File): yield obj
File
python
facebook__pyre-check
stubs/integration_test/fixture_source/integration_test/cache.py
{ "start": 1276, "end": 1359 }
class ____(YetAnotherBase): def method(self, x): pass
YetAnotherOverride2
python
hyperopt__hyperopt
hyperopt/tests/test_base.py
{ "start": 1677, "end": 4153 }
class ____: """ Run some generic sanity-checks of a suggest algorithm to make sure that it respects the semantics expected by e.g. fmin. Use it like this: TestRand = Suggest_API.make_test_class(rand.suggest, 'TestRand') """ @classmethod def make_tst_class(cls, suggest, domain, name): class Tester(unittest.TestCase, cls): def suggest(self, *args, **kwargs): print(args, kwargs) return suggest(*args, **kwargs) def setUp(self): self.domain = domain Tester.__name__ = name return Tester seed_randomizes = True def idxs_vals_from_ids(self, ids, seed): docs = self.suggest(ids, self.domain, Trials(), seed) trials = trials_from_docs(docs) idxs, vals = miscs_to_idxs_vals(trials.miscs) return idxs, vals def test_arbitrary_ids(self): # -- suggest implementations should work for arbitrary ID # values (possibly assuming they are hashable), and the # ID values should have no effect on the return values. ids_1 = [-2, 0, 7, "a", "007", 66, "a3", "899", 23, 2333] ids_2 = ["a", "b", "c", "d", 1, 2, 3, 0.1, 0.2, 0.3] idxs_1, vals_1 = self.idxs_vals_from_ids(ids=ids_1, seed=45) idxs_2, vals_2 = self.idxs_vals_from_ids(ids=ids_2, seed=45) all_ids_1 = set() for var, ids in list(idxs_1.items()): all_ids_1.update(ids) all_ids_2 = set() for var, ids in list(idxs_2.items()): all_ids_2.update(ids) self.assertEqual(all_ids_1, set(ids_1)) self.assertEqual(all_ids_2, set(ids_2)) self.assertEqual(vals_1, vals_2) def test_seed_randomizes(self): # # suggest() algorithms can be either stochastic (e.g. random search) # or deterministic (e.g. grid search). If an suggest implementation # is stochastic, then changing the seed argument should change the # return value. # if not self.seed_randomizes: return # -- sample 20 points to make sure we get some differences even # for small search spaces (chance of false failure is 1/million). idxs_1, vals_1 = self.idxs_vals_from_ids(ids=list(range(20)), seed=45) idxs_2, vals_2 = self.idxs_vals_from_ids(ids=list(range(20)), seed=46) self.assertNotEqual((idxs_1, vals_1), (idxs_2, vals_2))
Suggest_API
python
pytorch__pytorch
torch/_inductor/ir.py
{ "start": 143293, "end": 143349 }
class ____(Enum): SYMM_MEM = "symm_mem"
CommBufferType
python
realpython__materials
tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/frontends/window/renderers.py
{ "start": 167, "end": 797 }
class ____(tk.Tk): def __init__(self, events: Queue) -> None: super().__init__() self.title("Tic-Tac-Toe") self.events = events self.buttons = [] for row in range(3): for col in range(3): button = ttk.Button(master=self, text="", width=5) self.buttons.append(button) button.bind("<ButtonPress-1>", self.on_button_click) button.grid(row=row, column=col, padx=5, pady=5) def on_button_click(self, event): clicked_button = event.widget self.events.put(self.buttons.index(clicked_button))
Window
python
PyCQA__pylint
tests/functional/a/assigning/assigning_non_slot.py
{ "start": 3483, "end": 3735 }
class ____(Unknown): __slots__ = ['yo'] def test(self): self.not_yo = 42 # pylint: disable=wrong-import-order, wrong-import-position from typing import ( Generic, TypeVar, ) TypeT = TypeVar('TypeT')
ClassHavingUnknownAncestors
python
huggingface__transformers
src/transformers/models/biogpt/modular_biogpt.py
{ "start": 1969, "end": 2038 }
class ____(BartScaledWordEmbedding): pass
BioGptScaledWordEmbedding
python
huggingface__transformers
src/transformers/models/csm/modeling_csm.py
{ "start": 19386, "end": 24379 }
class ____(CsmPreTrainedModel): config: CsmDepthDecoderConfig def __init__(self, config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding((config.num_codebooks * config.vocab_size), config.backbone_hidden_size) self.layers = nn.ModuleList( [CsmDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = CsmRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = CsmRotaryEmbedding(config=config) self.gradient_checkpointing = False self.inputs_embeds_projector = nn.Linear(config.backbone_hidden_size, config.hidden_size, bias=False) # Initialize weights and apply final processing self.post_init() @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, backbone_last_hidden_state: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: r""" backbone_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, backbone_hidden_size)`, *optional*): The last hidden state of the backbone model. Such input is required when the first codebook token (the one generated by the backbone model) is provided in the `input_ids` argument. """ if position_ids is not None and not is_torchdynamo_compiling(): logger.warning_once( "Custom `position_ids` were provided but will be ignored. CSM depth decoder automatically determines position_ids " "from `cache_position` and as it requires them to be identical across the batch, the provided position_ids will be ignored." ) position_ids = None if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds.") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 inputs_seq_length = inputs_embeds.shape[1] if inputs_embeds is not None else input_ids.shape[1] device = inputs_embeds.device if inputs_embeds is not None else input_ids.device cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_seq_length, device=device) if inputs_embeds is None: codebook_idxs = torch.clamp(cache_position - 1, min=0) offset = codebook_idxs * self.vocab_size inputs_embeds = self.embed_tokens(input_ids + offset) input_ids_are_first_codebook = cache_position[0] == 0 if backbone_last_hidden_state is not None: inputs_embeds[:, 0] = backbone_last_hidden_state else: if not is_torchdynamo_compiling() and input_ids_are_first_codebook: logger.warning( "When the first codebook token is provided, `backbone_last_hidden_state` should also be provided for correct inference." ) inputs_embeds = self.inputs_embeds_projector(inputs_embeds) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_ids = cache_position.unsqueeze(0) position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, )
CsmDepthDecoderModel
python
PrefectHQ__prefect
src/prefect/blocks/notifications.py
{ "start": 7089, "end": 12336 }
class ____(AbstractAppriseNotificationBlock): """ Enables sending notifications via a provided PagerDuty webhook. See [Apprise notify_pagerduty docs](https://github.com/caronc/apprise/wiki/Notify_pagerduty) for more info on formatting the URL. Examples: Load a saved PagerDuty webhook and send a message: ```python from prefect.blocks.notifications import PagerDutyWebHook pagerduty_webhook_block = PagerDutyWebHook.load("BLOCK_NAME") pagerduty_webhook_block.notify("Hello from Prefect!") ``` """ _description = "Enables sending notifications via a provided PagerDuty webhook." _block_type_name = "Pager Duty Webhook" _block_type_slug = "pager-duty-webhook" _logo_url = HttpUrl( "https://cdn.sanity.io/images/3ugk85nk/production/8dbf37d17089c1ce531708eac2e510801f7b3aee-250x250.png" ) _documentation_url = HttpUrl( "https://docs.prefect.io/latest/automate/events/automations-triggers#sending-notifications-with-automations" ) # PagerDuty requires a valid severity level from its PAGERDUTY_SEVERITY_MAP notify_type: Literal["info", "success", "warning", "failure"] = Field( # pyright: ignore[reportIncompatibleVariableOverride] default="info", description="The severity of the notification." ) integration_key: SecretStr = Field( default=..., description=( "This can be found on the Events API V2 " "integration's detail page, and is also referred to as a Routing Key. " "This must be provided alongside `api_key`, but will error if provided " "alongside `url`." ), ) api_key: SecretStr = Field( default=..., title="API Key", description=( "This can be found under Integrations. " "This must be provided alongside `integration_key`, but will error if " "provided alongside `url`." ), ) source: Optional[str] = Field( default="Prefect", description="The source string as part of the payload." ) component: str = Field( default="Notification", description="The component string as part of the payload.", ) group: Optional[str] = Field( default=None, description="The group string as part of the payload." ) class_id: Optional[str] = Field( default=None, title="Class ID", description="The class string as part of the payload.", ) region_name: Literal["us", "eu"] = Field( default="us", description="The region name." ) clickable_url: Optional[AnyHttpUrl] = Field( default=None, title="Clickable URL", description="A clickable URL to associate with the notice.", ) include_image: bool = Field( default=True, description="Associate the notification status via a represented icon.", ) custom_details: Optional[dict[str, str]] = Field( default=None, description="Additional details to include as part of the payload.", examples=['{"disk_space_left": "145GB"}'], ) def block_initialization(self) -> None: try: # Try importing for apprise>=1.18.0 from apprise.plugins.pagerduty import NotifyPagerDuty except ImportError: # Fallback for versions apprise<1.18.0 from apprise.plugins.NotifyPagerDuty import ( # pyright: ignore[reportMissingImports] this is a fallback NotifyPagerDuty, # pyright: ignore[reportUnknownVariableType] incomplete type hints in apprise ) url = SecretStr( NotifyPagerDuty( apikey=self.api_key.get_secret_value(), integrationkey=self.integration_key.get_secret_value(), source=self.source, component=self.component, group=self.group, class_id=self.class_id, region_name=self.region_name, click=self.clickable_url, include_image=self.include_image, details=self.custom_details, ).url() # pyright: ignore[reportUnknownMemberType, reportUnknownArgumentType] incomplete type hints in apprise ) self._start_apprise_client(url) @sync_compatible async def notify( # pyright: ignore[reportIncompatibleMethodOverride] TODO: update to sync only once base class is updated self, body: str, subject: str | None = None, ): """ Apprise will combine subject and body by default, so we need to move the body into the custom_details field. custom_details is part of the webhook url, so we need to update the url and restart the client. """ if subject: self.custom_details = self.custom_details or {} self.custom_details.update( {"Prefect Notification Body": body.replace(" ", "%20")} ) body = " " self.block_initialization() await super().notify(body, subject) # pyright: ignore[reportGeneralTypeIssues] TODO: update to sync only once base class is updated
PagerDutyWebHook
python
tensorflow__tensorflow
tensorflow/python/trackable/resource.py
{ "start": 10112, "end": 10820 }
class ____(TrackableResource): """Restored SavedResource.""" def __init__(self, device=""): super().__init__(device=device) @classmethod def _deserialize_from_proto(cls, object_proto, dependencies, **unused_kwargs): obj = cls(device=object_proto.resource.device) resource_creator = dependencies.get("_create_resource") if resource_creator is not None: obj._create_resource = resource_creator # pylint: disable=protected-access return obj def _add_trackable_child(self, name, value): setattr(self, name, value) if (isinstance(value, base.Trackable) and not isinstance(value, def_function.Function)): self._track_trackable(value, name)
RestoredResource
python
apache__airflow
helm-tests/tests/helm_tests/airflow_core/test_worker.py
{ "start": 40149, "end": 43726 }
class ____: """Tests worker keda auto scaler.""" def test_should_add_component_specific_labels(self): docs = render_chart( values={ "executor": "CeleryExecutor", "workers": { "keda": {"enabled": True}, "labels": {"test_label": "test_label_value"}, }, }, show_only=["templates/workers/worker-kedaautoscaler.yaml"], ) assert "test_label" in jmespath.search("metadata.labels", docs[0]) assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value" def test_should_remove_replicas_field(self): docs = render_chart( values={ "executor": "CeleryExecutor", "workers": { "keda": {"enabled": True}, }, }, show_only=["templates/workers/worker-deployment.yaml"], ) assert "replicas" not in jmespath.search("spec", docs[0]) @pytest.mark.parametrize( ("query", "executor", "expected_query"), [ # default query with CeleryExecutor ( None, "CeleryExecutor", "SELECT ceil(COUNT(*)::decimal / 16) FROM task_instance" " WHERE (state='running' OR state='queued')", ), # default query with CeleryKubernetesExecutor ( None, "CeleryKubernetesExecutor", "SELECT ceil(COUNT(*)::decimal / 16) FROM task_instance" " WHERE (state='running' OR state='queued') AND queue != 'kubernetes'", ), # test custom static query ( "SELECT ceil(COUNT(*)::decimal / 16) FROM task_instance", "CeleryKubernetesExecutor", "SELECT ceil(COUNT(*)::decimal / 16) FROM task_instance", ), # test custom template query ( "SELECT ceil(COUNT(*)::decimal / {{ mul .Values.config.celery.worker_concurrency 2 }})" " FROM task_instance", "CeleryKubernetesExecutor", "SELECT ceil(COUNT(*)::decimal / 32) FROM task_instance", ), ], ) def test_should_use_keda_query(self, query, executor, expected_query): docs = render_chart( values={ "executor": executor, "workers": { "keda": {"enabled": True, **({"query": query} if query else {})}, }, }, show_only=["templates/workers/worker-kedaautoscaler.yaml"], ) assert expected_query == jmespath.search("spec.triggers[0].metadata.query", docs[0]) def test_mysql_db_backend_keda_worker(self): docs = render_chart( values={ "data": {"metadataConnection": {"protocol": "mysql"}}, "workers": { "keda": {"enabled": True}, }, }, show_only=["templates/workers/worker-kedaautoscaler.yaml"], ) assert jmespath.search("spec.triggers[0].metadata.queryValue", docs[0]) == "1" assert jmespath.search("spec.triggers[0].metadata.targetQueryValue", docs[0]) is None assert jmespath.search("spec.triggers[0].metadata.connectionStringFromEnv", docs[0]) == "KEDA_DB_CONN" assert jmespath.search("spec.triggers[0].metadata.connectionFromEnv", docs[0]) is None
TestWorkerKedaAutoScaler
python
python-poetry__poetry
tests/inspection/test_lazy_wheel.py
{ "start": 1706, "end": 17146 }
class ____(IntEnum): # numbers must be negative to avoid conflicts with HTTP status codes as_positive = -1 # JFrog Artifactory bug (RTDEV-38572) one_more = -2 # JFrog Artifactory bug (one more byte than requested) def build_head_response( accept_ranges: str | None, content_length: int, response_headers: dict[str, str] ) -> HttpResponse: response_headers["Content-Length"] = str(content_length) if accept_ranges: response_headers["Accept-Ranges"] = accept_ranges return 200, response_headers, b"" def build_partial_response( rng: str, wheel_bytes: bytes, response_headers: dict[str, str], *, negative_offset_failure: NegativeOffsetFailure | None = None, ) -> HttpResponse: status_code = 206 response_headers["Accept-Ranges"] = "bytes" total_length = len(wheel_bytes) if rng.startswith("-"): # negative offset offset = int(rng) if negative_offset_failure == NegativeOffsetFailure.as_positive: # some servers interpret a negative offset like "-10" as "0-10" start = 0 end = min(-offset, total_length - 1) body = wheel_bytes[start : end + 1] elif negative_offset_failure == NegativeOffsetFailure.one_more: # https://github.com/python-poetry/poetry/issues/9056#issuecomment-1973273721 offset -= 1 # one more byte start = total_length + offset # negative start of content range possible! end = total_length - 1 body = wheel_bytes[offset:] response_headers["Content-Length"] = str(-offset) # just wrong... else: start = total_length + offset if start < 0: # wheel is smaller than initial chunk size response_headers["Content-Length"] = str(len(wheel_bytes)) return 200, response_headers, wheel_bytes end = total_length - 1 body = wheel_bytes[offset:] else: # range with start and end start, end = map(int, rng.split("-")) body = wheel_bytes[start : end + 1] response_headers["Content-Range"] = f"bytes {start}-{end}/{total_length}" if "Content-Length" not in response_headers: response_headers["Content-Length"] = str(len(body)) return status_code, response_headers, body @pytest.fixture def handle_request_factory( fixture_dir: FixtureDirGetter, package_distribution_lookup: PackageDistributionLookup, ) -> RequestCallbackFactory: def _factory( *, accept_ranges: str | None = "bytes", negative_offset_error: tuple[int, bytes] | None = None, ignore_accept_ranges: bool = False, ) -> HttpRequestCallback: def handle_request(request: PreparedRequest) -> HttpResponse: assert request.url name = Path(urlparse(request.url).path).name wheel = package_distribution_lookup(name) or package_distribution_lookup( "demo-0.1.0-py2.py3-none-any.whl" ) if not wheel: return 404, {}, b"Not Found" wheel_bytes = wheel.read_bytes() response_headers: dict[str, str] = {} if request.method == "HEAD": return build_head_response( accept_ranges, len(wheel_bytes), response_headers ) rng = request.headers.get("Range", "=").split("=")[1] negative_offset_failure = None if negative_offset_error and rng.startswith("-"): if negative_offset_error[0] == codes.requested_range_not_satisfiable: response_headers["Content-Range"] = f"bytes */{len(wheel_bytes)}" if negative_offset_error[0] == NegativeOffsetFailure.as_positive: negative_offset_failure = NegativeOffsetFailure.as_positive elif negative_offset_error[0] == NegativeOffsetFailure.one_more: negative_offset_failure = NegativeOffsetFailure.one_more else: response_headers["Content-Length"] = str( len(negative_offset_error[1]) ) return ( negative_offset_error[0], response_headers, negative_offset_error[1], ) if accept_ranges == "bytes" and rng and not ignore_accept_ranges: return build_partial_response( rng, wheel_bytes, response_headers, negative_offset_failure=negative_offset_failure, ) status_code = 200 body = wheel_bytes response_headers["Content-Length"] = str(len(body)) return status_code, response_headers, body return handle_request return _factory @pytest.fixture def assert_metadata_from_wheel_url( http: responses.RequestsMock, handle_request_factory: RequestCallbackFactory, ) -> AssertMetadataFromWheelUrl: def _assertion( *, accept_ranges: str | None = "bytes", negative_offset_error: tuple[int, bytes] | None = None, expected_requests: int = 3, request_callback_wrapper: HttpRequestCallbackWrapper | None = None, redirect: bool = False, ) -> None: http.reset() domain = ( f"lazy-wheel-{negative_offset_error[0] if negative_offset_error else 0}.com" ) uri_regex = re.compile(f"^https://{domain}/.*$") request_callback = handle_request_factory( accept_ranges=accept_ranges, negative_offset_error=negative_offset_error ) if request_callback_wrapper is not None: request_callback = request_callback_wrapper(request_callback) http.add_callback(responses.GET, uri_regex, callback=request_callback) http.add_callback(responses.HEAD, uri_regex, callback=request_callback) if redirect: http_setup_redirect(http, responses.GET, responses.HEAD) url_prefix = "redirect." if redirect else "" url = f"https://{url_prefix}{domain}/poetry_core-1.5.0-py3-none-any.whl" metadata = metadata_from_wheel_url("poetry-core", url, requests.Session()) assert metadata["name"] == "poetry-core" assert metadata["version"] == "1.5.0" assert metadata["author"] == "Sébastien Eustace" assert metadata["requires_dist"] == [ 'importlib-metadata (>=1.7.0) ; python_version < "3.8"' ] assert len(http.calls) == expected_requests return _assertion @pytest.mark.parametrize( "negative_offset_error", [ None, (codes.not_found, b"Not found"), # Nexus (codes.method_not_allowed, b"Method not allowed"), (codes.requested_range_not_satisfiable, b"Requested range not satisfiable"), (codes.internal_server_error, b"Internal server error"), # GAR (codes.not_implemented, b"Unsupported client range"), # PyPI (NegativeOffsetFailure.as_positive, b"handle negative offset as positive"), (NegativeOffsetFailure.one_more, b"one more byte than requested"), ], ) def test_metadata_from_wheel_url( assert_metadata_from_wheel_url: AssertMetadataFromWheelUrl, negative_offset_error: tuple[int, bytes] | None, ) -> None: # negative offsets supported: # 1. end of central directory # 2. whole central directory # 3. METADATA file # negative offsets not supported: # 1. failed range request # 2. HEAD request # 3.-5. see negative offsets 1.-3. expected_requests = 3 if negative_offset_error: if negative_offset_error[0] in { codes.requested_range_not_satisfiable, NegativeOffsetFailure.as_positive, NegativeOffsetFailure.one_more, }: expected_requests += 1 else: expected_requests += 2 assert_metadata_from_wheel_url( negative_offset_error=negative_offset_error, expected_requests=expected_requests ) # second wheel -> one less request if negative offsets are not supported expected_requests = min(expected_requests, 4) assert_metadata_from_wheel_url( negative_offset_error=negative_offset_error, expected_requests=expected_requests ) def test_metadata_from_wheel_url_416_missing_content_range( assert_metadata_from_wheel_url: AssertMetadataFromWheelUrl, ) -> None: def request_callback_wrapper( request_callback: HttpRequestCallback, ) -> HttpRequestCallback: def _wrapped(request: PreparedRequest) -> HttpResponse: status_code, response_headers, body = request_callback(request) return ( status_code, { header: response_headers[header] for header in response_headers if header.lower() != "content-range" }, body, ) return _wrapped assert_metadata_from_wheel_url( negative_offset_error=( codes.requested_range_not_satisfiable, b"Requested range not satisfiable", ), expected_requests=5, request_callback_wrapper=request_callback_wrapper, ) def test_metadata_from_wheel_url_with_redirect( assert_metadata_from_wheel_url: AssertMetadataFromWheelUrl, ) -> None: assert_metadata_from_wheel_url( negative_offset_error=None, expected_requests=6, redirect=True, ) def test_metadata_from_wheel_url_with_redirect_after_500( assert_metadata_from_wheel_url: AssertMetadataFromWheelUrl, ) -> None: assert_metadata_from_wheel_url( negative_offset_error=(codes.internal_server_error, b"Internal server error"), expected_requests=10, redirect=True, ) @pytest.mark.parametrize( ("negative_offset_failure", "expected_requests"), [ (None, 1), (NegativeOffsetFailure.as_positive, 1), (NegativeOffsetFailure.one_more, 2), ], ) def test_metadata_from_wheel_url_smaller_than_initial_chunk_size( http: responses.RequestsMock, handle_request_factory: RequestCallbackFactory, negative_offset_failure: NegativeOffsetFailure | None, expected_requests: int, ) -> None: domain = f"tiny-wheel-{str(negative_offset_failure).casefold()}.com" uri_regex = re.compile(f"^https://{domain}/.*$") request_callback = handle_request_factory( negative_offset_error=( (negative_offset_failure, b"") if negative_offset_failure else None ) ) http.add_callback(responses.GET, uri_regex, callback=request_callback) http.add_callback(responses.HEAD, uri_regex, callback=request_callback) url = f"https://{domain}/zipp-3.5.0-py3-none-any.whl" metadata = metadata_from_wheel_url("zipp", url, requests.Session()) assert metadata["name"] == "zipp" assert metadata["version"] == "3.5.0" assert metadata["author"] == "Jason R. Coombs" assert len(metadata["requires_dist"]) == 12 assert len(http.calls) == expected_requests @pytest.mark.parametrize("accept_ranges", [None, "none"]) def test_metadata_from_wheel_url_range_requests_not_supported_one_request( http: responses.RequestsMock, handle_request_factory: RequestCallbackFactory, accept_ranges: str | None, ) -> None: domain = "no-range-requests.com" uri_regex = re.compile(f"^https://{domain}/.*$") request_callback = handle_request_factory(accept_ranges=accept_ranges) http.add_callback(responses.GET, uri_regex, callback=request_callback) http.add_callback(responses.HEAD, uri_regex, callback=request_callback) url = f"https://{domain}/poetry_core-1.5.0-py3-none-any.whl" with pytest.raises(HTTPRangeRequestUnsupportedError): metadata_from_wheel_url("poetry-core", url, requests.Session()) assert len(http.calls) == 1 assert http.calls[0].request.method == "GET" @pytest.mark.parametrize( "negative_offset_error", [ (codes.method_not_allowed, b"Method not allowed"), (codes.not_implemented, b"Unsupported client range"), ], ) def test_metadata_from_wheel_url_range_requests_not_supported_two_requests( http: responses.RequestsMock, handle_request_factory: RequestCallbackFactory, negative_offset_error: tuple[int, bytes], ) -> None: domain = f"no-negative-offsets-{negative_offset_error[0]}.com" uri_regex = re.compile(f"^https://{domain}/.*$") request_callback = handle_request_factory( accept_ranges=None, negative_offset_error=negative_offset_error ) http.add_callback(responses.GET, uri_regex, callback=request_callback) http.add_callback(responses.HEAD, uri_regex, callback=request_callback) url = f"https://{domain}/poetry_core-1.5.0-py3-none-any.whl" with pytest.raises(HTTPRangeRequestUnsupportedError): metadata_from_wheel_url("poetry-core", url, requests.Session()) assert len(http.calls) == 2 assert http.calls[0].request.method == "GET" assert http.calls[1].request.method == "HEAD" def test_metadata_from_wheel_url_range_requests_supported_but_not_respected( http: responses.RequestsMock, handle_request_factory: RequestCallbackFactory, ) -> None: domain = "range-requests-not-respected.com" uri_regex = re.compile(f"^https://{domain}/.*$") request_callback = handle_request_factory( negative_offset_error=(codes.method_not_allowed, b"Method not allowed"), ignore_accept_ranges=True, ) http.add_callback(responses.GET, uri_regex, callback=request_callback) http.add_callback(responses.HEAD, uri_regex, callback=request_callback) url = f"https://{domain}/poetry_core-1.5.0-py3-none-any.whl" with pytest.raises(HTTPRangeRequestNotRespectedError): metadata_from_wheel_url("poetry-core", url, requests.Session()) assert len(http.calls) == 3 assert http.calls[0].request.method == "GET" assert http.calls[1].request.method == "HEAD" assert http.calls[2].request.method == "GET" def test_metadata_from_wheel_url_invalid_wheel( http: responses.RequestsMock, handle_request_factory: RequestCallbackFactory, ) -> None: domain = "invalid-wheel.com" uri_regex = re.compile(f"^https://{domain}/.*$") request_callback = handle_request_factory() http.add_callback(responses.GET, uri_regex, callback=request_callback) http.add_callback(responses.HEAD, uri_regex, callback=request_callback) url = f"https://{domain}/demo_missing_dist_info-0.1.0-py2.py3-none-any.whl" with pytest.raises(InvalidWheelError): metadata_from_wheel_url("demo-missing-dist-info", url, requests.Session()) assert len(http.calls) == 1 assert http.calls[0].request.method == "GET" def test_metadata_from_wheel_url_handles_unexpected_errors( mocker: MockerFixture, ) -> None: mocker.patch( "poetry.inspection.lazy_wheel.LazyWheelOverHTTP.read_metadata", side_effect=RuntimeError(), ) with pytest.raises(LazyWheelUnsupportedError): metadata_from_wheel_url( "demo-missing-dist-info", "https://runtime-error.com/demo_missing_dist_info-0.1.0-py2.py3-none-any.whl", requests.Session(), )
NegativeOffsetFailure
python
dagster-io__dagster
python_modules/libraries/dagster-databricks/dagster_databricks/components/databricks_asset_bundle/configs.py
{ "start": 14532, "end": 15687 }
class ____(DatabricksBaseTask): @property def task_type(self) -> str: return DATABRICKS_UNKNOWN_TASK_TYPE @property def task_config_metadata(self) -> Mapping[str, Any]: return {} @classmethod def from_job_task_config(cls, job_task_config: Mapping[str, Any]) -> "DatabricksUnknownTask": # We can't parse config and parameters of Databricks tasks of unknown type task_config = {} task_parameters = {} return DatabricksUnknownTask( task_key=job_task_config["task_key"], task_config=task_config, task_parameters=task_parameters, depends_on=parse_depends_on(job_task_config.get("depends_on", [])), job_name=job_task_config.get("job_name", "unknown"), libraries=job_task_config.get("libraries", []), ) @property def needs_cluster(self) -> bool: return False @property def submit_task_key(self) -> str: return DATABRICKS_UNKNOWN_TASK_TYPE def to_databricks_sdk_task(self) -> jobs.Task: return jobs.Task(task_key=self.task_key) @record_custom
DatabricksUnknownTask
python
sqlalchemy__sqlalchemy
test/orm/test_relationships.py
{ "start": 62900, "end": 65037 }
class ____(fixtures.MappedTest): """test a relationship with a non-column entity in the primary join, is not viewonly, and also has the non-column's clause mentioned in the foreign keys list. """ @classmethod def define_tables(cls, metadata): Table( "tags", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("data", String(50)), ) Table( "tag_foo", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("tagid", Integer), Column("data", String(50)), ) def test_basic(self): tag_foo, tags = self.tables.tag_foo, self.tables.tags class Tag(ComparableEntity): pass class TagInstance(ComparableEntity): pass self.mapper_registry.map_imperatively( Tag, tags, properties={ "foo": relationship( TagInstance, primaryjoin=sa.and_( tag_foo.c.data == "iplc_case", tag_foo.c.tagid == tags.c.id, ), foreign_keys=[tag_foo.c.tagid, tag_foo.c.data], ) }, ) self.mapper_registry.map_imperatively(TagInstance, tag_foo) sess = fixture_session() t1 = Tag(data="some tag") t1.foo.append(TagInstance(data="iplc_case")) t1.foo.append(TagInstance(data="not_iplc_case")) sess.add(t1) sess.flush() sess.expunge_all() # relationship works eq_( sess.query(Tag).all(), [Tag(data="some tag", foo=[TagInstance(data="iplc_case")])], ) # both TagInstances were persisted eq_( sess.query(TagInstance).order_by(TagInstance.data).all(), [TagInstance(data="iplc_case"), TagInstance(data="not_iplc_case")], )
FKEquatedToConstantTest
python
getsentry__sentry
src/sentry/snuba/entity_subscription.py
{ "start": 16696, "end": 17606 }
class ____(BaseMetricsEntitySubscription): query_type = SnubaQuery.Type.PERFORMANCE dataset = Dataset.PerformanceMetrics def get_snql_aggregations(self) -> list[str]: return [self.aggregate] def get_snql_extra_conditions(self) -> list[Condition]: return [] def aggregate_query_results( self, data: list[dict[str, Any]], alias: str | None = None ) -> list[dict[str, Any]]: return data def get_granularity(self) -> int: # Both time_window and granularity are in seconds # Time windows <= 1h -> Granularity 60s # Time windows > 1h and <= 24h -> Granularity 1 hour # Time windows > 24h -> Granularity 1 day if self.time_window <= 3600: return 60 elif 3600 < self.time_window <= 24 * 3600: return 3600 else: return 24 * 3600
PerformanceMetricsEntitySubscription
python
scipy__scipy
scipy/integrate/tests/test_integrate.py
{ "start": 7693, "end": 10217 }
class ____: # Check integrate.ode correctly handles solout for dopri5 and dop853 def _run_solout_test(self, integrator): # Check correct usage of solout ts = [] ys = [] t0 = 0.0 tend = 10.0 y0 = [1.0, 2.0] def solout(t, y): ts.append(t) ys.append(y.copy()) def rhs(t, y): return [y[0] + y[1], -y[1]**2] ig = ode(rhs).set_integrator(integrator) ig.set_solout(solout) ig.set_initial_value(y0, t0) ret = ig.integrate(tend) assert_array_equal(ys[0], y0) assert_array_equal(ys[-1], ret) assert_equal(ts[0], t0) assert_equal(ts[-1], tend) def test_solout(self): for integrator in ('dopri5', 'dop853'): self._run_solout_test(integrator) def _run_solout_after_initial_test(self, integrator): # Check if solout works even if it is set after the initial value. ts = [] ys = [] t0 = 0.0 tend = 10.0 y0 = [1.0, 2.0] def solout(t, y): ts.append(t) ys.append(y.copy()) def rhs(t, y): return [y[0] + y[1], -y[1]**2] ig = ode(rhs).set_integrator(integrator) ig.set_initial_value(y0, t0) ig.set_solout(solout) ret = ig.integrate(tend) assert_array_equal(ys[0], y0) assert_array_equal(ys[-1], ret) assert_equal(ts[0], t0) assert_equal(ts[-1], tend) def test_solout_after_initial(self): for integrator in ('dopri5', 'dop853'): self._run_solout_after_initial_test(integrator) def _run_solout_break_test(self, integrator): # Check correct usage of stopping via solout ts = [] ys = [] t0 = 0.0 tend = 10.0 y0 = [1.0, 2.0] def solout(t, y): ts.append(t) ys.append(y.copy()) if t > tend/2.0: return -1 def rhs(t, y): return [y[0] + y[1], -y[1]**2] ig = ode(rhs).set_integrator(integrator) ig.set_solout(solout) ig.set_initial_value(y0, t0) ret = ig.integrate(tend) assert_array_equal(ys[0], y0) assert_array_equal(ys[-1], ret) assert_equal(ts[0], t0) assert_(ts[-1] > tend/2.0) assert_(ts[-1] < tend) def test_solout_break(self): for integrator in ('dopri5', 'dop853'): self._run_solout_break_test(integrator)
TestSolout
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_type_checking/module/direct.py
{ "start": 105, "end": 147 }
class ____(MyBaseClass): foo: Sequence
Foo
python
getsentry__sentry
tests/sentry/models/test_grouprelease.py
{ "start": 257, "end": 1816 }
class ____(TestCase): def test_simple(self) -> None: project = self.create_project() group = self.create_group(project=project) release = Release.objects.create(version="abc", organization_id=project.organization_id) release.add_project(project) env = Environment.objects.create(organization_id=project.organization_id, name="prod") datetime = timezone.now() grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=env, datetime=datetime ) assert grouprelease.project_id == project.id assert grouprelease.group_id == group.id assert grouprelease.release_id == release.id assert grouprelease.environment == "prod" assert grouprelease.first_seen == datetime assert grouprelease.last_seen == datetime datetime_new = timezone.now() + timedelta(days=1) grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=env, datetime=datetime_new ) assert grouprelease.first_seen == datetime assert grouprelease.last_seen == datetime_new datetime_new2 = datetime_new + timedelta(seconds=1) # this should not update immediately as the window is too close grouprelease = GroupRelease.get_or_create( group=group, release=release, environment=env, datetime=datetime_new2 ) assert grouprelease.first_seen == datetime assert grouprelease.last_seen == datetime_new
GetOrCreateTest
python
networkx__networkx
networkx/algorithms/flow/tests/test_maxflow.py
{ "start": 17526, "end": 18940 }
class ____: def test_cutoff(self): k = 5 p = 1000 G = nx.DiGraph() for i in range(k): G.add_edge("s", (i, 0), capacity=2) nx.add_path(G, ((i, j) for j in range(p)), capacity=2) G.add_edge((i, p - 1), "t", capacity=2) R = shortest_augmenting_path(G, "s", "t", two_phase=True, cutoff=k) assert k <= R.graph["flow_value"] <= (2 * k) R = shortest_augmenting_path(G, "s", "t", two_phase=False, cutoff=k) assert k <= R.graph["flow_value"] <= (2 * k) R = edmonds_karp(G, "s", "t", cutoff=k) assert k <= R.graph["flow_value"] <= (2 * k) R = dinitz(G, "s", "t", cutoff=k) assert k <= R.graph["flow_value"] <= (2 * k) R = boykov_kolmogorov(G, "s", "t", cutoff=k) assert k <= R.graph["flow_value"] <= (2 * k) def test_complete_graph_cutoff(self): G = nx.complete_graph(5) nx.set_edge_attributes(G, {(u, v): 1 for u, v in G.edges()}, "capacity") for flow_func in [ shortest_augmenting_path, edmonds_karp, dinitz, boykov_kolmogorov, ]: for cutoff in [3, 2, 1]: result = nx.maximum_flow_value( G, 0, 4, flow_func=flow_func, cutoff=cutoff ) assert cutoff == result, f"cutoff error in {flow_func.__name__}"
TestCutoff
python
dagster-io__dagster
python_modules/dagster/dagster/_core/execution/context/logger.py
{ "start": 2170, "end": 3150 }
class ____(InitLoggerContext): """Logger initialization context outputted by ``build_init_logger_context``. Represents a context whose config has not yet been validated against a logger definition, hence the inability to access the `logger_def` attribute. When an instance of ``UnboundInitLoggerContext`` is passed to ``LoggerDefinition.initialize``, config is validated, and it is subsumed into an `InitLoggerContext`, which contains the logger_def validated against. """ def __init__(self, logger_config: Any, job_def: Optional[JobDefinition]): super().__init__(logger_config, logger_def=None, job_def=job_def, run_id=None) @property def logger_def(self) -> LoggerDefinition: raise DagsterInvariantViolationError( "UnboundInitLoggerContext has not been validated against a logger definition." ) @property def run_id(self) -> Optional[str]: return RUN_ID_PLACEHOLDER
UnboundInitLoggerContext
python
rapidsai__cudf
python/cudf_polars/cudf_polars/typing/__init__.py
{ "start": 6050, "end": 6345 }
class ____(TypedDict): """DataFrame serialization header.""" columns_kwargs: list[ColumnOptions] frame_count: int # Not public in polars yet RankMethod = Literal["ordinal", "dense", "min", "max", "average"] RoundMethod = Literal["half_away_from_zero", "half_to_even"]
DataFrameHeader
python
getsentry__sentry
src/sentry/apidocs/parameters.py
{ "start": 30690, "end": 31300 }
class ____: QUERY = OpenApiParameter( name="query", location="query", required=False, type=str, description="""The name of the Discover query you'd like to filter by.""", ) SORT = OpenApiParameter( name="sortBy", location="query", required=False, type=str, description="""The property to sort results by. If not specified, the results are sorted by query name. Available fields are: - `name` - `dateCreated` - `dateUpdated` - `mostPopular` - `recentlyViewed` - `myqueries` """, )
DiscoverSavedQueriesParams
python
django__django
tests/migrations/migrations_test_apps/with_generic_model/models.py
{ "start": 176, "end": 499 }
class ____[T](models.Model): """A model inheriting from typing.Generic via the PEP 695 syntax.""" # Example from Python docs: # https://typing.python.org/en/latest/spec/generics.html#arbitrary-generic-types-as-base-classes T1 = typing.TypeVar("T1") T2 = typing.TypeVar("T2") T3 = typing.TypeVar("T3")
GenericModelPEP695
python
GoogleCloudPlatform__python-docs-samples
appengine/standard/migration/ndb/redis_cache/main.py
{ "start": 1037, "end": 3037 }
class ____(ndb.Model): """Models an individual Guestbook entry with content and date.""" content = ndb.StringProperty() date = ndb.DateTimeProperty(auto_now_add=True) # [END gae_ndb_redis_cache_greeting] # [START gae_ndb_redis_cache_query] with client.context(global_cache=global_cache): @classmethod def query_book(cls, ancestor_key): return cls.query(ancestor=ancestor_key).order(-cls.date) @app.route("/", methods=["GET"]) def display_guestbook(): guestbook_name = request.args.get("guestbook_name", "") print("GET guestbook name is {}".format(guestbook_name)) with client.context(global_cache=global_cache): ancestor_key = ndb.Key("Book", guestbook_name or "*notitle*") greetings = Greeting.query_book(ancestor_key).fetch(20) # [END gae_ndb_redis_cache_query] greeting_blockquotes = [greeting.content for greeting in greetings] return render_template( "index.html", greeting_blockquotes=greeting_blockquotes, guestbook_name=guestbook_name, ) # [START gae_ndb_redis_cache_submit] @app.route("/sign", methods=["POST"]) def update_guestbook(): # We set the parent key on each 'Greeting' to ensure each guestbook's # greetings are in the same entity group. guestbook_name = request.form.get("guestbook_name", "") print("Guestbook name from the form: {}".format(guestbook_name)) with client.context(global_cache=global_cache): print("Guestbook name from the URL: {}".format(guestbook_name)) greeting = Greeting( parent=ndb.Key("Book", guestbook_name or "*notitle*"), content=request.form.get("content", None), ) greeting.put() # [END gae_ndb_redis_cache_submit] return redirect("/?" + urlencode({"guestbook_name": guestbook_name})) if __name__ == "__main__": # This is used when running locally. app.run(host="127.0.0.1", port=8080, debug=True) # [END gae_ndb_redis_cache]
Greeting
python
kamyu104__LeetCode-Solutions
Python/make-a-positive-array.py
{ "start": 50, "end": 597 }
class ____(object): def makeArrayPositive(self, nums): """ :type nums: List[int] :rtype: int """ MAX_VAL = 10**18 result = 0 prev1 = nums[0]+nums[1] prev2 = nums[0] max_prev3 = 0 for i in xrange(2, len(nums)): prefix = prev1+nums[i] if prefix-max_prev3 <= 0: prefix = prev1+MAX_VAL result += 1 max_prev3 = max(max_prev3, prev2) prev1, prev2 = prefix, prev1 return result
Solution
python
keras-team__keras
keras/src/trainers/data_adapters/grain_dataset_adapter_test.py
{ "start": 606, "end": 8260 }
class ____(testing.TestCase): def _get_dataset(self, dataset_type, worker_count=0, num_threads=0): x = np.random.normal(size=(34, 4)).astype("float32") y = np.random.normal(size=(34, 2)).astype("float32") class MySource(grain.sources.RandomAccessDataSource): def __init__(self, x, y): self.x = x self.y = y def __getitem__(self, idx): return self.x[idx], self.y[idx] def __len__(self): return len(self.x) if dataset_type == "map_dataset": dataset = grain.MapDataset.source(MySource(x, y)).batch( batch_size=16 ) elif dataset_type == "iter_dataset": dataset = ( grain.MapDataset.source(MySource(x, y)) .to_iter_dataset() .batch(batch_size=16) ) else: source = MySource(x, y) dataset = grain.DataLoader( data_source=source, operations=[grain.transforms.Batch(batch_size=16)], shard_options=grain.sharding.NoSharding(), sampler=grain.samplers.IndexSampler( num_records=len(source), num_epochs=1 ), worker_count=worker_count, read_options=grain.ReadOptions(num_threads=num_threads), ) return dataset @parameterized.named_parameters( named_product( dataset_type=["map_dataset", "iter_dataset", "data_loader"] ) ) def test_basic_flow(self, dataset_type): dataset = self._get_dataset(dataset_type) adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset) self.assertEqual(adapter.num_batches, None) self.assertEqual(adapter.batch_size, 16) self.assertEqual(adapter.has_partial_batch, None) self.assertEqual(adapter.partial_batch_size, None) if backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.Tensor elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = np.ndarray elif backend.backend() == "torch": it = adapter.get_torch_dataloader() expected_class = torch.Tensor else: it = adapter.get_numpy_iterator() expected_class = np.ndarray for i, batch in enumerate(it): self.assertEqual(len(batch), 2) bx, by = batch self.assertIsInstance(bx, expected_class) self.assertIsInstance(by, expected_class) self.assertEqual(bx.dtype, by.dtype) self.assertContainsExactSubsequence(str(bx.dtype), "float32") if i < 2: self.assertEqual(bx.shape, (16, 4)) self.assertEqual(by.shape, (16, 2)) else: self.assertEqual(bx.shape, (2, 4)) self.assertEqual(by.shape, (2, 2)) @parameterized.named_parameters( named_product(data_type=["list", "dict", "nested_list", "nested_dict"]) ) def test_nested_data(self, data_type): if data_type not in ("list", "dict", "nested_list", "nested_dict"): raise ValueError( "data_type must be one of 'list', 'dict', 'nested_list' or " f"'nested_dict'. Received: {data_type}" ) class NestedSource(grain.sources.RandomAccessDataSource): def __init__(self, data_type): self.x = np.random.random((40, 4)).astype("float32") self.y = np.random.random((40, 2)).astype("float32") self.data_type = data_type def __len__(self): return len(self.x) def __getitem__(self, idx): x = self.x[idx] y = self.y[idx] if self.data_type == "list": return x, y elif self.data_type == "dict": return {"x": x, "y": y} elif self.data_type == "nested_list": return x, (x, y) elif self.data_type == "nested_dict": return {"data": {"x": x, "y": y}} dataset = grain.MapDataset.source(NestedSource(data_type)).batch( batch_size=4 ) adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset) if backend.backend() == "tensorflow": it = adapter.get_tf_dataset() expected_class = tf.Tensor elif backend.backend() == "jax": it = adapter.get_jax_iterator() expected_class = np.ndarray elif backend.backend() == "torch": it = adapter.get_torch_dataloader() expected_class = torch.Tensor else: it = adapter.get_numpy_iterator() expected_class = np.ndarray for batch in it: if data_type == "list": self.assertEqual(len(batch), 2) bx, by = batch elif data_type == "dict": self.assertEqual(len(batch), 2) bx, by = batch["x"], batch["y"] elif data_type == "nested_list": self.assertEqual(len(batch), 2) bx, (_, by) = batch elif data_type == "nested_dict": self.assertEqual(len(batch["data"]), 2) bx, by = batch["data"]["x"], batch["data"]["y"] self.assertIsInstance(bx, expected_class) self.assertIsInstance(by, expected_class) self.assertEqual(bx.dtype, by.dtype) self.assertEqual(bx.shape, (4, 4)) self.assertEqual(by.shape, (4, 2)) def test_multiple_calling_on_iterators(self): dataset = self._get_dataset("iter_dataset") adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset) numpy_it = adapter.get_numpy_iterator() jax_it = adapter.get_jax_iterator() tf_it = adapter.get_tf_dataset() torch_it = adapter.get_torch_dataloader() for it in (numpy_it, jax_it, tf_it, torch_it): for batch in it: self.assertEqual(len(batch), 2) bx, by = batch self.assertEqual(bx.dtype, by.dtype) def test_builtin_prefetch(self): dataset = grain.MapDataset.source(Range2DSource(0, 42)) adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset) self.assertTrue(adapter.builtin_prefetch) def test_num_batches(self): dataset = grain.MapDataset.source(Range2DSource(0, 42)) adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset) self.assertEqual(adapter.num_batches, None) # Test for Infinite Cardinality dataset = grain.MapDataset.source(Range2DSource(0, 42)) dataset = dataset.repeat() adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset) self.assertIsNone(adapter.num_batches) # Test for Unknown Cardinality dataset = dataset.filter(lambda x: True) adapter = grain_dataset_adapter.GrainDatasetAdapter(dataset) self.assertIsNone(adapter.num_batches) def test_invalid_dataset_type(self): with self.assertRaisesRegex( ValueError, ( r"Expected `dataset` to be a grain.MapDataset, " r"grain.IterDataset or grain.DataLoader. " ), ): grain_dataset_adapter.GrainDatasetAdapter( "This is not a grain.Dataset" )
GrainDatasetAdapterTest
python
great-expectations__great_expectations
tests/integration/data_sources_and_expectations/test_misconfigured_expectations.py
{ "start": 1071, "end": 3609 }
class ____: # Currently bugs with the following (not raising misconfiguration errors at all): # - sqlite # - databricks # - mysql # - spark-filesystem-csv _DATA = pd.DataFrame({"a": ["b", "c"]}) _EXPECTATION = gxe.ExpectColumnStdevToBeBetween( column="a", min_value=0, max_value=1, strict_min=True, strict_max=True, ) @parameterize_batch_for_data_sources( data_source_configs=PANDAS_DATA_SOURCES, data=_DATA, ) def test_pandas(self, batch_for_datasource) -> None: self._assert_misconfiguration( batch_for_datasource=batch_for_datasource, exception_message="could not convert string to float", ) @parameterize_batch_for_data_sources( data_source_configs=[BigQueryDatasourceTestConfig()], data=_DATA, ) def test_bigquery(self, batch_for_datasource) -> None: self._assert_misconfiguration( batch_for_datasource=batch_for_datasource, exception_message="No matching signature for operator *", ) @parameterize_batch_for_data_sources( data_source_configs=[MSSQLDatasourceTestConfig()], data=_DATA, ) def test_mssql(self, batch_for_datasource) -> None: self._assert_misconfiguration( batch_for_datasource=batch_for_datasource, exception_message="Error converting data type varchar to float", ) @parameterize_batch_for_data_sources( data_source_configs=[PostgreSQLDatasourceTestConfig()], data=_DATA, ) def test_postgresql(self, batch_for_datasource) -> None: self._assert_misconfiguration( batch_for_datasource=batch_for_datasource, exception_message="operator does not exist: numeric * character varying", ) @parameterize_batch_for_data_sources( data_source_configs=[SnowflakeDatasourceTestConfig()], data=_DATA, ) def test_snowflake(self, batch_for_datasource) -> None: self._assert_misconfiguration( batch_for_datasource=batch_for_datasource, exception_message="Numeric value \\'b\\' is not recognized", ) def _assert_misconfiguration(self, batch_for_datasource: Batch, exception_message: str) -> None: result = batch_for_datasource.validate(self._EXPECTATION) assert not result.success assert exception_message in str(result.exception_info)
TestNumericExpectationAgainstStrDataMisconfiguration
python
networkx__networkx
networkx/readwrite/tests/test_graphml.py
{ "start": 155, "end": 12969 }
class ____: @classmethod def setup_class(cls): cls.simple_directed_data = """<?xml version="1.0" encoding="UTF-8"?> <!-- This file was written by the JAVA GraphML Library.--> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <graph id="G" edgedefault="directed"> <node id="n0"/> <node id="n1"/> <node id="n2"/> <node id="n3"/> <node id="n4"/> <node id="n5"/> <node id="n6"/> <node id="n7"/> <node id="n8"/> <node id="n9"/> <node id="n10"/> <edge id="foo" source="n0" target="n2"/> <edge source="n1" target="n2"/> <edge source="n2" target="n3"/> <edge source="n3" target="n5"/> <edge source="n3" target="n4"/> <edge source="n4" target="n6"/> <edge source="n6" target="n5"/> <edge source="n5" target="n7"/> <edge source="n6" target="n8"/> <edge source="n8" target="n7"/> <edge source="n8" target="n9"/> </graph> </graphml>""" cls.simple_directed_graph = nx.DiGraph() cls.simple_directed_graph.add_node("n10") cls.simple_directed_graph.add_edge("n0", "n2", id="foo") cls.simple_directed_graph.add_edge("n0", "n2") cls.simple_directed_graph.add_edges_from( [ ("n1", "n2"), ("n2", "n3"), ("n3", "n5"), ("n3", "n4"), ("n4", "n6"), ("n6", "n5"), ("n5", "n7"), ("n6", "n8"), ("n8", "n7"), ("n8", "n9"), ] ) cls.simple_directed_fh = io.BytesIO(cls.simple_directed_data.encode("UTF-8")) cls.attribute_data = """<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <key id="d0" for="node" attr.name="color" attr.type="string"> <default>yellow</default> </key> <key id="d1" for="edge" attr.name="weight" attr.type="double"/> <graph id="G" edgedefault="directed"> <node id="n0"> <data key="d0">green</data> </node> <node id="n1"/> <node id="n2"> <data key="d0">blue</data> </node> <node id="n3"> <data key="d0">red</data> </node> <node id="n4"/> <node id="n5"> <data key="d0">turquoise</data> </node> <edge id="e0" source="n0" target="n2"> <data key="d1">1.0</data> </edge> <edge id="e1" source="n0" target="n1"> <data key="d1">1.0</data> </edge> <edge id="e2" source="n1" target="n3"> <data key="d1">2.0</data> </edge> <edge id="e3" source="n3" target="n2"/> <edge id="e4" source="n2" target="n4"/> <edge id="e5" source="n3" target="n5"/> <edge id="e6" source="n5" target="n4"> <data key="d1">1.1</data> </edge> </graph> </graphml> """ cls.attribute_graph = nx.DiGraph(id="G") cls.attribute_graph.graph["node_default"] = {"color": "yellow"} cls.attribute_graph.add_node("n0", color="green") cls.attribute_graph.add_node("n2", color="blue") cls.attribute_graph.add_node("n3", color="red") cls.attribute_graph.add_node("n4") cls.attribute_graph.add_node("n5", color="turquoise") cls.attribute_graph.add_edge("n0", "n2", id="e0", weight=1.0) cls.attribute_graph.add_edge("n0", "n1", id="e1", weight=1.0) cls.attribute_graph.add_edge("n1", "n3", id="e2", weight=2.0) cls.attribute_graph.add_edge("n3", "n2", id="e3") cls.attribute_graph.add_edge("n2", "n4", id="e4") cls.attribute_graph.add_edge("n3", "n5", id="e5") cls.attribute_graph.add_edge("n5", "n4", id="e6", weight=1.1) cls.attribute_fh = io.BytesIO(cls.attribute_data.encode("UTF-8")) cls.node_attribute_default_data = """<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <key id="d0" for="node" attr.name="boolean_attribute" attr.type="boolean"><default>false</default></key> <key id="d1" for="node" attr.name="int_attribute" attr.type="int"><default>0</default></key> <key id="d2" for="node" attr.name="long_attribute" attr.type="long"><default>0</default></key> <key id="d3" for="node" attr.name="float_attribute" attr.type="float"><default>0.0</default></key> <key id="d4" for="node" attr.name="double_attribute" attr.type="double"><default>0.0</default></key> <key id="d5" for="node" attr.name="string_attribute" attr.type="string"><default>Foo</default></key> <graph id="G" edgedefault="directed"> <node id="n0"/> <node id="n1"/> <edge id="e0" source="n0" target="n1"/> </graph> </graphml> """ cls.node_attribute_default_graph = nx.DiGraph(id="G") cls.node_attribute_default_graph.graph["node_default"] = { "boolean_attribute": False, "int_attribute": 0, "long_attribute": 0, "float_attribute": 0.0, "double_attribute": 0.0, "string_attribute": "Foo", } cls.node_attribute_default_graph.add_node("n0") cls.node_attribute_default_graph.add_node("n1") cls.node_attribute_default_graph.add_edge("n0", "n1", id="e0") cls.node_attribute_default_fh = io.BytesIO( cls.node_attribute_default_data.encode("UTF-8") ) cls.attribute_named_key_ids_data = """<?xml version='1.0' encoding='utf-8'?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <key id="edge_prop" for="edge" attr.name="edge_prop" attr.type="string"/> <key id="prop2" for="node" attr.name="prop2" attr.type="string"/> <key id="prop1" for="node" attr.name="prop1" attr.type="string"/> <graph edgedefault="directed"> <node id="0"> <data key="prop1">val1</data> <data key="prop2">val2</data> </node> <node id="1"> <data key="prop1">val_one</data> <data key="prop2">val2</data> </node> <edge source="0" target="1"> <data key="edge_prop">edge_value</data> </edge> </graph> </graphml> """ cls.attribute_named_key_ids_graph = nx.DiGraph() cls.attribute_named_key_ids_graph.add_node("0", prop1="val1", prop2="val2") cls.attribute_named_key_ids_graph.add_node("1", prop1="val_one", prop2="val2") cls.attribute_named_key_ids_graph.add_edge("0", "1", edge_prop="edge_value") fh = io.BytesIO(cls.attribute_named_key_ids_data.encode("UTF-8")) cls.attribute_named_key_ids_fh = fh cls.attribute_numeric_type_data = """<?xml version='1.0' encoding='utf-8'?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <key attr.name="weight" attr.type="double" for="node" id="d1" /> <key attr.name="weight" attr.type="double" for="edge" id="d0" /> <graph edgedefault="directed"> <node id="n0"> <data key="d1">1</data> </node> <node id="n1"> <data key="d1">2.0</data> </node> <edge source="n0" target="n1"> <data key="d0">1</data> </edge> <edge source="n1" target="n0"> <data key="d0">k</data> </edge> <edge source="n1" target="n1"> <data key="d0">1.0</data> </edge> </graph> </graphml> """ cls.attribute_numeric_type_graph = nx.DiGraph() cls.attribute_numeric_type_graph.add_node("n0", weight=1) cls.attribute_numeric_type_graph.add_node("n1", weight=2.0) cls.attribute_numeric_type_graph.add_edge("n0", "n1", weight=1) cls.attribute_numeric_type_graph.add_edge("n1", "n1", weight=1.0) fh = io.BytesIO(cls.attribute_numeric_type_data.encode("UTF-8")) cls.attribute_numeric_type_fh = fh cls.simple_undirected_data = """<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <graph id="G"> <node id="n0"/> <node id="n1"/> <node id="n2"/> <node id="n10"/> <edge id="foo" source="n0" target="n2"/> <edge source="n1" target="n2"/> <edge source="n2" target="n3"/> </graph> </graphml>""" # <edge source="n8" target="n10" directed="false"/> cls.simple_undirected_graph = nx.Graph() cls.simple_undirected_graph.add_node("n10") cls.simple_undirected_graph.add_edge("n0", "n2", id="foo") cls.simple_undirected_graph.add_edges_from([("n1", "n2"), ("n2", "n3")]) fh = io.BytesIO(cls.simple_undirected_data.encode("UTF-8")) cls.simple_undirected_fh = fh cls.undirected_multigraph_data = """<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <graph id="G"> <node id="n0"/> <node id="n1"/> <node id="n2"/> <node id="n10"/> <edge id="e0" source="n0" target="n2"/> <edge id="e1" source="n1" target="n2"/> <edge id="e2" source="n2" target="n1"/> </graph> </graphml>""" cls.undirected_multigraph = nx.MultiGraph() cls.undirected_multigraph.add_node("n10") cls.undirected_multigraph.add_edge("n0", "n2", id="e0") cls.undirected_multigraph.add_edge("n1", "n2", id="e1") cls.undirected_multigraph.add_edge("n2", "n1", id="e2") fh = io.BytesIO(cls.undirected_multigraph_data.encode("UTF-8")) cls.undirected_multigraph_fh = fh cls.undirected_multigraph_no_multiedge_data = """<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <graph id="G"> <node id="n0"/> <node id="n1"/> <node id="n2"/> <node id="n10"/> <edge id="e0" source="n0" target="n2"/> <edge id="e1" source="n1" target="n2"/> <edge id="e2" source="n2" target="n3"/> </graph> </graphml>""" cls.undirected_multigraph_no_multiedge = nx.MultiGraph() cls.undirected_multigraph_no_multiedge.add_node("n10") cls.undirected_multigraph_no_multiedge.add_edge("n0", "n2", id="e0") cls.undirected_multigraph_no_multiedge.add_edge("n1", "n2", id="e1") cls.undirected_multigraph_no_multiedge.add_edge("n2", "n3", id="e2") fh = io.BytesIO(cls.undirected_multigraph_no_multiedge_data.encode("UTF-8")) cls.undirected_multigraph_no_multiedge_fh = fh cls.multigraph_only_ids_for_multiedges_data = """<?xml version="1.0" encoding="UTF-8"?> <graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd"> <graph id="G"> <node id="n0"/> <node id="n1"/> <node id="n2"/> <node id="n10"/> <edge source="n0" target="n2"/> <edge id="e1" source="n1" target="n2"/> <edge id="e2" source="n2" target="n1"/> </graph> </graphml>""" cls.multigraph_only_ids_for_multiedges = nx.MultiGraph() cls.multigraph_only_ids_for_multiedges.add_node("n10") cls.multigraph_only_ids_for_multiedges.add_edge("n0", "n2") cls.multigraph_only_ids_for_multiedges.add_edge("n1", "n2", id="e1") cls.multigraph_only_ids_for_multiedges.add_edge("n2", "n1", id="e2") fh = io.BytesIO(cls.multigraph_only_ids_for_multiedges_data.encode("UTF-8")) cls.multigraph_only_ids_for_multiedges_fh = fh
BaseGraphML