language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/model_service.py
{ "start": 11957, "end": 16346 }
class ____(GoogleCloudBaseOperator): r""" Lists Models in a Location. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param retry: Designation of what errors, if any, should be retried. :param filter: An expression for filtering the results of the request. For field names both snake_case and camelCase are supported. - ``model`` supports = and !=. ``model`` represents the Model ID, Could be in format the last segment of the Model's [resource name][google.cloud.aiplatform.v1.Model.name]. - ``display_name`` supports = and != - ``labels`` supports general map functions that is: -- ``labels.key=value`` - key:value equality -- \`labels.key:\* or labels:key - key existence -- A key including a space must be quoted. ``labels."a key"``. :param page_size: The standard list page size. :param page_token: The standard list page token. Typically obtained via [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelsResponse.next_page_token] of the previous [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] call. :param read_mask: Mask specifying which fields to read. :param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc" after a field name for descending. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. :param gcp_conn_id: The connection ID to use connecting to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields = ("region", "project_id", "impersonation_chain") operator_extra_links = (VertexAIModelListLink(),) def __init__( self, *, region: str, project_id: str, filter: str | None = None, page_size: int | None = None, page_token: str | None = None, read_mask: str | None = None, order_by: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.region = region self.project_id = project_id self.filter = filter self.page_size = page_size self.page_token = page_token self.read_mask = read_mask self.order_by = order_by self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain @property def extra_links_params(self) -> dict[str, Any]: return { "project_id": self.project_id, } def execute(self, context: Context): hook = ModelServiceHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) results = hook.list_models( project_id=self.project_id, region=self.region, filter=self.filter, page_size=self.page_size, page_token=self.page_token, read_mask=self.read_mask, order_by=self.order_by, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) VertexAIModelListLink.persist(context=context) return [Model.to_dict(result) for result in results]
ListModelsOperator
python
django__django
tests/logging_tests/tests.py
{ "start": 4353, "end": 7738 }
class ____( SetupDefaultLoggingMixin, LoggingAssertionMixin, LoggingCaptureMixin, SimpleTestCase ): def test_page_found_no_warning(self): self.client.get("/innocent/") self.assertEqual(self.logger_output.getvalue(), "") def test_redirect_no_warning(self): self.client.get("/redirect/") self.assertEqual(self.logger_output.getvalue(), "") def test_page_not_found_warning(self): self.assertLogsRequest( url="/does_not_exist/", level="WARNING", status_code=404, msg="Not Found: /does_not_exist/", ) def test_control_chars_escaped(self): self.assertLogsRequest( url="/%1B[1;31mNOW IN RED!!!1B[0m/", level="WARNING", status_code=404, msg=r"Not Found: /\x1b[1;31mNOW IN RED!!!1B[0m/", ) async def test_async_page_not_found_warning(self): with self.assertLogs("django.request", "WARNING") as cm: await self.async_client.get("/does_not_exist/") self.assertLogRecord(cm, "Not Found: /does_not_exist/", logging.WARNING, 404) async def test_async_control_chars_escaped(self): with self.assertLogs("django.request", "WARNING") as cm: await self.async_client.get(r"/%1B[1;31mNOW IN RED!!!1B[0m/") self.assertLogRecord( cm, r"Not Found: /\x1b[1;31mNOW IN RED!!!1B[0m/", logging.WARNING, 404 ) def test_page_not_found_raised(self): self.assertLogsRequest( url="/does_not_exist_raised/", level="WARNING", status_code=404, msg="Not Found: /does_not_exist_raised/", ) def test_uncaught_exception(self): self.assertLogsRequest( url="/uncaught_exception/", level="ERROR", status_code=500, msg="Internal Server Error: /uncaught_exception/", exc_class=views.UncaughtException, ) def test_internal_server_error(self): self.assertLogsRequest( url="/internal_server_error/", level="ERROR", status_code=500, msg="Internal Server Error: /internal_server_error/", ) def test_internal_server_error_599(self): self.assertLogsRequest( url="/internal_server_error/?status=599", level="ERROR", status_code=599, msg="Unknown Status Code: /internal_server_error/", ) def test_permission_denied(self): self.assertLogsRequest( url="/permission_denied/", level="WARNING", status_code=403, msg="Forbidden (Permission denied): /permission_denied/", exc_class=PermissionDenied, ) def test_multi_part_parser_error(self): self.assertLogsRequest( url="/multi_part_parser_error/", level="WARNING", status_code=400, msg="Bad request (Unable to parse request body): /multi_part_parser_error/", exc_class=MultiPartParserError, ) @override_settings( DEBUG=True, USE_I18N=True, LANGUAGES=[("en", "English")], MIDDLEWARE=[ "django.middleware.locale.LocaleMiddleware", "django.middleware.common.CommonMiddleware", ], ROOT_URLCONF="logging_tests.urls_i18n", )
HandlerLoggingTests
python
Lightning-AI__lightning
tests/tests_pytorch/callbacks/test_weight_averaging.py
{ "start": 1816, "end": 2178 }
class ____(BoringModel): def __init__(self): super().__init__() self.layer = None def configure_model(self): print("XXX configure_model") self.layer = nn.Sequential(nn.Linear(32, 32), nn.ReLU(), nn.Linear(32, 2)) def configure_optimizers(self): return torch.optim.SGD(self.parameters(), lr=0.01)
LargeTestModel
python
spack__spack
lib/spack/spack/spec_parser.py
{ "start": 19008, "end": 24424 }
class ____: """Parse a single spec node from a stream of tokens""" __slots__ = "ctx", "has_version", "literal_str" def __init__(self, ctx, literal_str): self.ctx = ctx self.literal_str = literal_str self.has_version = False def parse( self, initial_spec: Optional["spack.spec.Spec"] = None, root: bool = True ) -> Tuple["spack.spec.Spec", List[str]]: """Parse a single spec node from a stream of tokens Args: initial_spec: object to be constructed root: True if we're parsing a root, False if dependency after ^ or % Return: The object passed as argument """ parser_warnings: List[str] = [] last_compiler = None if initial_spec is None: from spack.spec import Spec initial_spec = Spec() if not self.ctx.next_token or self.ctx.expect(SpecTokens.DEPENDENCY): return initial_spec, parser_warnings # If we start with a package name we have a named spec, we cannot # accept another package name afterwards in a node if self.ctx.accept(SpecTokens.UNQUALIFIED_PACKAGE_NAME): # if name is '*', this is an anonymous spec if self.ctx.current_token.value != "*": initial_spec.name = self.ctx.current_token.value elif self.ctx.accept(SpecTokens.FULLY_QUALIFIED_PACKAGE_NAME): parts = self.ctx.current_token.value.split(".") name = parts[-1] namespace = ".".join(parts[:-1]) initial_spec.name = name initial_spec.namespace = namespace elif self.ctx.accept(SpecTokens.FILENAME): return FileParser(self.ctx).parse(initial_spec), parser_warnings def raise_parsing_error(string: str, cause: Optional[Exception] = None): """Raise a spec parsing error with token context.""" raise SpecParsingError(string, self.ctx.current_token, self.literal_str) from cause def add_flag(name: str, value: Union[str, bool], propagate: bool, concrete: bool): """Wrapper around ``Spec._add_flag()`` that adds parser context to errors raised.""" try: initial_spec._add_flag(name, value, propagate, concrete) except Exception as e: raise_parsing_error(str(e), e) def warn_if_after_compiler(token: str): """Register a warning for %compiler followed by +variant that will in the future apply to the compiler instead of the current root.""" if last_compiler: parser_warnings.append(f"`{token}` should go before `{last_compiler}`") while True: if ( self.ctx.accept(SpecTokens.VERSION_HASH_PAIR) or self.ctx.accept(SpecTokens.GIT_VERSION) or self.ctx.accept(SpecTokens.VERSION) ): if self.has_version: raise_parsing_error("Spec cannot have multiple versions") initial_spec.versions = spack.version.VersionList( [spack.version.from_string(self.ctx.current_token.value[1:])] ) initial_spec.attach_git_version_lookup() self.has_version = True warn_if_after_compiler(self.ctx.current_token.value) elif self.ctx.accept(SpecTokens.BOOL_VARIANT): name = self.ctx.current_token.value[1:].strip() variant_value = self.ctx.current_token.value[0] == "+" add_flag(name, variant_value, propagate=False, concrete=True) warn_if_after_compiler(self.ctx.current_token.value) elif self.ctx.accept(SpecTokens.PROPAGATED_BOOL_VARIANT): name = self.ctx.current_token.value[2:].strip() variant_value = self.ctx.current_token.value[0:2] == "++" add_flag(name, variant_value, propagate=True, concrete=True) warn_if_after_compiler(self.ctx.current_token.value) elif self.ctx.accept(SpecTokens.KEY_VALUE_PAIR): name, value = self.ctx.current_token.value.split("=", maxsplit=1) concrete = name.endswith(":") if concrete: name = name[:-1] add_flag( name, strip_quotes_and_unescape(value), propagate=False, concrete=concrete ) warn_if_after_compiler(self.ctx.current_token.value) elif self.ctx.accept(SpecTokens.PROPAGATED_KEY_VALUE_PAIR): name, value = self.ctx.current_token.value.split("==", maxsplit=1) concrete = name.endswith(":") if concrete: name = name[:-1] add_flag(name, strip_quotes_and_unescape(value), propagate=True, concrete=concrete) warn_if_after_compiler(self.ctx.current_token.value) elif self.ctx.expect(SpecTokens.DAG_HASH): if initial_spec.abstract_hash: break self.ctx.accept(SpecTokens.DAG_HASH) initial_spec.abstract_hash = self.ctx.current_token.value[1:] warn_if_after_compiler(self.ctx.current_token.value) else: break return initial_spec, parser_warnings
SpecNodeParser
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/connections.py
{ "start": 1151, "end": 2222 }
class ____(BaseModel): """Connection serializer for responses.""" connection_id: str = Field(serialization_alias="connection_id", validation_alias="conn_id") conn_type: str description: str | None host: str | None login: str | None schema_: str | None = Field(alias="schema") port: int | None password: str | None extra: str | None @field_validator("password", mode="after") @classmethod def redact_password(cls, v: str | None, field_info: ValidationInfo) -> str | None: if v is None: return None return str(redact(v, field_info.field_name)) @field_validator("extra", mode="before") @classmethod def redact_extra(cls, v: str | None) -> str | None: if v is None: return None try: extra_dict = json.loads(v) redacted_dict = redact(extra_dict) return json.dumps(redacted_dict) except json.JSONDecodeError: # we can't redact fields in an unstructured `extra` return v
ConnectionResponse
python
bokeh__bokeh
tests/unit/bokeh/server/test_server__server.py
{ "start": 2064, "end": 2118 }
class ____(Model): hooks = List(String)
HookListModel
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_stellar_address.py
{ "start": 1907, "end": 4747 }
class ____(ColumnMapExpectation): """Expect column values to be valid Stellar addresses.""" # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "all_valid": [ "GA7YNBW5CBTJZ3ZZOWX3ZNBKD6OE7A7IHUQVWMY62W2ZBG2SGZVOOPVH", "GBTA54J4LY5BAQWA4KECII66TPTU3V6DXPBPNVXIPMHN5W6QFATWRXY5", "GCINDD6LNZSYPND4WRQL6NRFGOAXMAMK7M3QP2JXWC5634BY4DSZ4YG2", "GDKRCHSD2YUW3X6FXRAVOOZZ2IOMWSGM6SH6I56VCX6V2DTPG7FO626W", ], "some_other": [ "1BoatSLRHtKNngkdXEeobR76b53LETtpyT", "n2nzi7xDTrMVK9stGpbK3BtrpBCJfH7LRQ", "3QJmV3qfvL9SuYo34YihAf3sRCW3qSinyC", "bc1qxneu85dnhx33asv8da45x55qyeu44ek9h3vngxdsare", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "all_valid"}, "out": { "success": True, }, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "some_other", "mostly": 1}, "out": { "success": False, }, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_stellar_address" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", "tags": [ "hackathon-22", "experimental", "typed-entities", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@szecsip", # Don't forget to add your github handle here! ], "requirements": ["coinaddrvalidator"], } if __name__ == "__main__": ExpectColumnValuesToBeValidStellarAddress().print_diagnostic_checklist()
ExpectColumnValuesToBeValidStellarAddress
python
walkccc__LeetCode
solutions/3443. Maximum Manhattan Distance After K Changes/3443.py
{ "start": 0, "end": 465 }
class ____: def maxDistance(self, s: str, k: int) -> int: return max(self._flip(s, k, 'NE'), self._flip(s, k, 'NW'), self._flip(s, k, 'SE'), self._flip(s, k, 'SW')) def _flip(self, s: str, k: int, direction: str) -> int: res = 0 pos = 0 opposite = 0 for c in s: if c in direction: pos += 1 else: pos -= 1 opposite += 1 res = max(res, pos + 2 * min(k, opposite)) return res
Solution
python
sympy__sympy
sympy/printing/str.py
{ "start": 569, "end": 32591 }
class ____(Printer): printmethod = "_sympystr" _default_settings: dict[str, Any] = { "order": None, "full_prec": "auto", "sympy_integers": False, "abbrev": False, "perm_cyclic": True, "min": None, "max": None, "dps" : None } _relationals: dict[str, str] = {} def parenthesize(self, item, level, strict=False): if (precedence(item) < level) or ((not strict) and precedence(item) <= level): return "(%s)" % self._print(item) else: return self._print(item) def stringify(self, args, sep, level=0): return sep.join([self.parenthesize(item, level) for item in args]) def emptyPrinter(self, expr): if isinstance(expr, str): return expr elif isinstance(expr, Basic): return repr(expr) else: return str(expr) def _print_Add(self, expr, order=None): terms = self._as_ordered_terms(expr, order=order) prec = precedence(expr) l = [] for term in terms: t = self._print(term) if t.startswith('-') and not term.is_Add: sign = "-" t = t[1:] else: sign = "+" if precedence(term) < prec or term.is_Add: l.extend([sign, "(%s)" % t]) else: l.extend([sign, t]) sign = l.pop(0) if sign == '+': sign = "" return sign + ' '.join(l) def _print_BooleanTrue(self, expr): return "True" def _print_BooleanFalse(self, expr): return "False" def _print_Not(self, expr): return '~%s' %(self.parenthesize(expr.args[0],PRECEDENCE["Not"])) def _print_And(self, expr): args = list(expr.args) for j, i in enumerate(args): if isinstance(i, Relational) and ( i.canonical.rhs is S.NegativeInfinity): args.insert(0, args.pop(j)) return self.stringify(args, " & ", PRECEDENCE["BitwiseAnd"]) def _print_Or(self, expr): return self.stringify(expr.args, " | ", PRECEDENCE["BitwiseOr"]) def _print_Xor(self, expr): return self.stringify(expr.args, " ^ ", PRECEDENCE["BitwiseXor"]) def _print_AppliedPredicate(self, expr): return '%s(%s)' % ( self._print(expr.function), self.stringify(expr.arguments, ", ")) def _print_Basic(self, expr): l = [self._print(o) for o in expr.args] return expr.__class__.__name__ + "(%s)" % ", ".join(l) def _print_BlockMatrix(self, B): if B.blocks.shape == (1, 1): self._print(B.blocks[0, 0]) return self._print(B.blocks) def _print_Catalan(self, expr): return 'Catalan' def _print_ComplexInfinity(self, expr): return 'zoo' def _print_ConditionSet(self, s): args = tuple([self._print(i) for i in (s.sym, s.condition)]) if s.base_set is S.UniversalSet: return 'ConditionSet(%s, %s)' % args args += (self._print(s.base_set),) return 'ConditionSet(%s, %s, %s)' % args def _print_Derivative(self, expr): dexpr = expr.expr dvars = [i[0] if i[1] == 1 else i for i in expr.variable_count] return 'Derivative(%s)' % ", ".join((self._print(arg) for arg in [dexpr] + dvars)) def _print_dict(self, d): keys = sorted(d.keys(), key=default_sort_key) items = [] for key in keys: item = "%s: %s" % (self._print(key), self._print(d[key])) items.append(item) return "{%s}" % ", ".join(items) def _print_Dict(self, expr): return self._print_dict(expr) def _print_RandomDomain(self, d): if hasattr(d, 'as_boolean'): return 'Domain: ' + self._print(d.as_boolean()) elif hasattr(d, 'set'): return ('Domain: ' + self._print(d.symbols) + ' in ' + self._print(d.set)) else: return 'Domain on ' + self._print(d.symbols) def _print_Dummy(self, expr): return '_' + expr.name def _print_EulerGamma(self, expr): return 'EulerGamma' def _print_Exp1(self, expr): return 'E' def _print_ExprCondPair(self, expr): return '(%s, %s)' % (self._print(expr.expr), self._print(expr.cond)) def _print_Function(self, expr): return expr.func.__name__ + "(%s)" % self.stringify(expr.args, ", ") def _print_GoldenRatio(self, expr): return 'GoldenRatio' def _print_Heaviside(self, expr): # Same as _print_Function but uses pargs to suppress default 1/2 for # 2nd args return expr.func.__name__ + "(%s)" % self.stringify(expr.pargs, ", ") def _print_TribonacciConstant(self, expr): return 'TribonacciConstant' def _print_ImaginaryUnit(self, expr): return 'I' def _print_Infinity(self, expr): return 'oo' def _print_Integral(self, expr): def _xab_tostr(xab): if len(xab) == 1: return self._print(xab[0]) else: return self._print((xab[0],) + tuple(xab[1:])) L = ', '.join([_xab_tostr(l) for l in expr.limits]) return 'Integral(%s, %s)' % (self._print(expr.function), L) def _print_Interval(self, i): fin = 'Interval{m}({a}, {b})' a, b, l, r = i.args if a.is_infinite and b.is_infinite: m = '' elif a.is_infinite and not r: m = '' elif b.is_infinite and not l: m = '' elif not l and not r: m = '' elif l and r: m = '.open' elif l: m = '.Lopen' else: m = '.Ropen' return fin.format(**{'a': a, 'b': b, 'm': m}) def _print_AccumulationBounds(self, i): return "AccumBounds(%s, %s)" % (self._print(i.min), self._print(i.max)) def _print_Inverse(self, I): return "%s**(-1)" % self.parenthesize(I.arg, PRECEDENCE["Pow"]) def _print_Lambda(self, obj): expr = obj.expr sig = obj.signature if len(sig) == 1 and sig[0].is_symbol: sig = sig[0] return "Lambda(%s, %s)" % (self._print(sig), self._print(expr)) def _print_LatticeOp(self, expr): args = sorted(expr.args, key=default_sort_key) return expr.func.__name__ + "(%s)" % ", ".join(self._print(arg) for arg in args) def _print_Limit(self, expr): e, z, z0, dir = expr.args return "Limit(%s, %s, %s, dir='%s')" % tuple(map(self._print, (e, z, z0, dir))) def _print_list(self, expr): return "[%s]" % self.stringify(expr, ", ") def _print_List(self, expr): return self._print_list(expr) def _print_MatrixBase(self, expr): return expr._format_str(self) def _print_MatrixElement(self, expr): return self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) \ + '[%s, %s]' % (self._print(expr.i), self._print(expr.j)) def _print_MatrixSlice(self, expr): def strslice(x, dim): x = list(x) if x[2] == 1: del x[2] if x[0] == 0: x[0] = '' if x[1] == dim: x[1] = '' return ':'.join((self._print(arg) for arg in x)) return (self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) + '[' + strslice(expr.rowslice, expr.parent.rows) + ', ' + strslice(expr.colslice, expr.parent.cols) + ']') def _print_DeferredVector(self, expr): return expr.name def _print_Mul(self, expr): prec = precedence(expr) # Check for unevaluated Mul. In this case we need to make sure the # identities are visible, multiple Rational factors are not combined # etc so we display in a straight-forward form that fully preserves all # args and their order. args = expr.args if args[0] is S.One or any( isinstance(a, Number) or a.is_Pow and all(ai.is_Integer for ai in a.args) for a in args[1:]): d, n = sift(args, lambda x: isinstance(x, Pow) and bool(x.exp.as_coeff_Mul()[0] < 0), binary=True) for i, di in enumerate(d): if di.exp.is_Number: e = -di.exp else: dargs = list(di.exp.args) dargs[0] = -dargs[0] e = Mul._from_args(dargs) d[i] = Pow(di.base, e, evaluate=False) if e - 1 else di.base pre = [] # don't parenthesize first factor if negative if n and not n[0].is_Add and n[0].could_extract_minus_sign(): pre = [self._print(n.pop(0))] nfactors = pre + [self.parenthesize(a, prec, strict=False) for a in n] if not nfactors: nfactors = ['1'] # don't parenthesize first of denominator unless singleton if len(d) > 1 and d[0].could_extract_minus_sign(): pre = [self._print(d.pop(0))] else: pre = [] dfactors = pre + [self.parenthesize(a, prec, strict=False) for a in d] n = '*'.join(nfactors) d = '*'.join(dfactors) if len(dfactors) > 1: return '%s/(%s)' % (n, d) elif dfactors: return '%s/%s' % (n, d) return n c, e = expr.as_coeff_Mul() if c < 0: expr = _keep_coeff(-c, e) sign = "-" else: sign = "" a = [] # items in the numerator b = [] # items that are in the denominator (if any) pow_paren = [] # Will collect all pow with more than one base element and exp = -1 if self.order not in ('old', 'none'): args = expr.as_ordered_factors() else: # use make_args in case expr was something like -x -> x args = Mul.make_args(expr) # Gather args for numerator/denominator def apow(i): b, e = i.as_base_exp() eargs = list(Mul.make_args(e)) if eargs[0] is S.NegativeOne: eargs = eargs[1:] else: eargs[0] = -eargs[0] e = Mul._from_args(eargs) if isinstance(i, Pow): return i.func(b, e, evaluate=False) return i.func(e, evaluate=False) for item in args: if (item.is_commutative and isinstance(item, Pow) and bool(item.exp.as_coeff_Mul()[0] < 0)): if item.exp is not S.NegativeOne: b.append(apow(item)) else: if (len(item.args[0].args) != 1 and isinstance(item.base, (Mul, Pow))): # To avoid situations like #14160 pow_paren.append(item) b.append(item.base) elif item.is_Rational and item is not S.Infinity: if item.p != 1: a.append(Rational(item.p)) if item.q != 1: b.append(Rational(item.q)) else: a.append(item) a = a or [S.One] a_str = [self.parenthesize(x, prec, strict=False) for x in a] b_str = [self.parenthesize(x, prec, strict=False) for x in b] # To parenthesize Pow with exp = -1 and having more than one Symbol for item in pow_paren: if item.base in b: b_str[b.index(item.base)] = "(%s)" % b_str[b.index(item.base)] if not b: return sign + '*'.join(a_str) elif len(b) == 1: return sign + '*'.join(a_str) + "/" + b_str[0] else: return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str) def _print_MatMul(self, expr): c, m = expr.as_coeff_mmul() sign = "" if c.is_number: re, im = c.as_real_imag() if im.is_zero and re.is_negative: expr = _keep_coeff(-c, m) sign = "-" elif re.is_zero and im.is_negative: expr = _keep_coeff(-c, m) sign = "-" return sign + '*'.join( [self.parenthesize(arg, precedence(expr)) for arg in expr.args] ) def _print_ElementwiseApplyFunction(self, expr): return "{}.({})".format( expr.function, self._print(expr.expr), ) def _print_NaN(self, expr): return 'nan' def _print_NegativeInfinity(self, expr): return '-oo' def _print_Order(self, expr): if not expr.variables or all(p is S.Zero for p in expr.point): if len(expr.variables) <= 1: return 'O(%s)' % self._print(expr.expr) else: return 'O(%s)' % self.stringify((expr.expr,) + expr.variables, ', ', 0) else: return 'O(%s)' % self.stringify(expr.args, ', ', 0) def _print_Ordinal(self, expr): return expr.__str__() def _print_Cycle(self, expr): return expr.__str__() def _print_Permutation(self, expr): from sympy.combinatorics.permutations import Permutation, Cycle from sympy.utilities.exceptions import sympy_deprecation_warning perm_cyclic = Permutation.print_cyclic if perm_cyclic is not None: sympy_deprecation_warning( f""" Setting Permutation.print_cyclic is deprecated. Instead use init_printing(perm_cyclic={perm_cyclic}). """, deprecated_since_version="1.6", active_deprecations_target="deprecated-permutation-print_cyclic", stacklevel=7, ) else: perm_cyclic = self._settings.get("perm_cyclic", True) if perm_cyclic: if not expr.size: return '()' # before taking Cycle notation, see if the last element is # a singleton and move it to the head of the string s = Cycle(expr)(expr.size - 1).__repr__()[len('Cycle'):] last = s.rfind('(') if not last == 0 and ',' not in s[last:]: s = s[last:] + s[:last] s = s.replace(',', '') return s else: s = expr.support() if not s: if expr.size < 5: return 'Permutation(%s)' % self._print(expr.array_form) return 'Permutation([], size=%s)' % self._print(expr.size) trim = self._print(expr.array_form[:s[-1] + 1]) + ', size=%s' % self._print(expr.size) use = full = self._print(expr.array_form) if len(trim) < len(full): use = trim return 'Permutation(%s)' % use def _print_Subs(self, obj): expr, old, new = obj.args if len(obj.point) == 1: old = old[0] new = new[0] return "Subs(%s, %s, %s)" % ( self._print(expr), self._print(old), self._print(new)) def _print_TensorIndex(self, expr): return expr._print() def _print_TensorHead(self, expr): return expr._print() def _print_Tensor(self, expr): return expr._print() def _print_TensMul(self, expr): # prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)" sign, args = expr._get_args_for_traditional_printer() return sign + "*".join( [self.parenthesize(arg, precedence(expr)) for arg in args] ) def _print_TensAdd(self, expr): return expr._print() def _print_ArraySymbol(self, expr): return self._print(expr.name) def _print_ArrayElement(self, expr): return "%s[%s]" % ( self.parenthesize(expr.name, PRECEDENCE["Func"], True), ", ".join([self._print(i) for i in expr.indices])) def _print_PermutationGroup(self, expr): p = [' %s' % self._print(a) for a in expr.args] return 'PermutationGroup([\n%s])' % ',\n'.join(p) def _print_Pi(self, expr): return 'pi' def _print_PolyRing(self, ring): return "Polynomial ring in %s over %s with %s order" % \ (", ".join((self._print(rs) for rs in ring.symbols)), self._print(ring.domain), self._print(ring.order)) def _print_FracField(self, field): return "Rational function field in %s over %s with %s order" % \ (", ".join((self._print(fs) for fs in field.symbols)), self._print(field.domain), self._print(field.order)) def _print_FreeGroupElement(self, elm): return elm.__str__() def _print_GaussianElement(self, poly): return "(%s + %s*I)" % (poly.x, poly.y) def _print_PolyElement(self, poly): return poly.str(self, PRECEDENCE, "%s**%s", "*") def _print_FracElement(self, frac): if frac.denom == 1: return self._print(frac.numer) else: numer = self.parenthesize(frac.numer, PRECEDENCE["Mul"], strict=True) denom = self.parenthesize(frac.denom, PRECEDENCE["Atom"], strict=True) return numer + "/" + denom def _print_Poly(self, expr): ATOM_PREC = PRECEDENCE["Atom"] - 1 terms, gens = [], [ self.parenthesize(s, ATOM_PREC) for s in expr.gens ] for monom, coeff in expr.terms(): s_monom = [] for i, e in enumerate(monom): if e > 0: if e == 1: s_monom.append(gens[i]) else: s_monom.append(gens[i] + "**%d" % e) s_monom = "*".join(s_monom) if coeff.is_Add: if s_monom: s_coeff = "(" + self._print(coeff) + ")" else: s_coeff = self._print(coeff) else: if s_monom: if coeff is S.One: terms.extend(['+', s_monom]) continue if coeff is S.NegativeOne: terms.extend(['-', s_monom]) continue s_coeff = self._print(coeff) if not s_monom: s_term = s_coeff else: s_term = s_coeff + "*" + s_monom if s_term.startswith('-'): terms.extend(['-', s_term[1:]]) else: terms.extend(['+', s_term]) if terms[0] in ('-', '+'): modifier = terms.pop(0) if modifier == '-': terms[0] = '-' + terms[0] format = expr.__class__.__name__ + "(%s, %s" from sympy.polys.polyerrors import PolynomialError try: format += ", modulus=%s" % expr.get_modulus() except PolynomialError: format += ", domain='%s'" % expr.get_domain() format += ")" for index, item in enumerate(gens): if len(item) > 2 and (item[:1] == "(" and item[len(item) - 1:] == ")"): gens[index] = item[1:len(item) - 1] return format % (' '.join(terms), ', '.join(gens)) def _print_UniversalSet(self, p): return 'UniversalSet' def _print_AlgebraicNumber(self, expr): if expr.is_aliased: return self._print(expr.as_poly().as_expr()) else: return self._print(expr.as_expr()) def _print_Pow(self, expr, rational=False): """Printing helper function for ``Pow`` Parameters ========== rational : bool, optional If ``True``, it will not attempt printing ``sqrt(x)`` or ``x**S.Half`` as ``sqrt``, and will use ``x**(1/2)`` instead. See examples for additional details Examples ======== >>> from sympy import sqrt, StrPrinter >>> from sympy.abc import x How ``rational`` keyword works with ``sqrt``: >>> printer = StrPrinter() >>> printer._print_Pow(sqrt(x), rational=True) 'x**(1/2)' >>> printer._print_Pow(sqrt(x), rational=False) 'sqrt(x)' >>> printer._print_Pow(1/sqrt(x), rational=True) 'x**(-1/2)' >>> printer._print_Pow(1/sqrt(x), rational=False) '1/sqrt(x)' Notes ===== ``sqrt(x)`` is canonicalized as ``Pow(x, S.Half)`` in SymPy, so there is no need of defining a separate printer for ``sqrt``. Instead, it should be handled here as well. """ PREC = precedence(expr) if expr.exp is S.Half and not rational: return "sqrt(%s)" % self._print(expr.base) if expr.is_commutative: if -expr.exp is S.Half and not rational: # Note: Don't test "expr.exp == -S.Half" here, because that will # match -0.5, which we don't want. return "%s/sqrt(%s)" % tuple((self._print(arg) for arg in (S.One, expr.base))) if expr.exp is -S.One: # Similarly to the S.Half case, don't test with "==" here. return '%s/%s' % (self._print(S.One), self.parenthesize(expr.base, PREC, strict=False)) e = self.parenthesize(expr.exp, PREC, strict=False) if self.printmethod == '_sympyrepr' and expr.exp.is_Rational and expr.exp.q != 1: # the parenthesized exp should be '(Rational(a, b))' so strip parens, # but just check to be sure. if e.startswith('(Rational'): return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e[1:-1]) return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), e) def _print_UnevaluatedExpr(self, expr): return self._print(expr.args[0]) def _print_MatPow(self, expr): PREC = precedence(expr) return '%s**%s' % (self.parenthesize(expr.base, PREC, strict=False), self.parenthesize(expr.exp, PREC, strict=False)) def _print_Integer(self, expr): if self._settings.get("sympy_integers", False): return "S(%s)" % (expr) return str(expr.p) def _print_Integers(self, expr): return 'Integers' def _print_Naturals(self, expr): return 'Naturals' def _print_Naturals0(self, expr): return 'Naturals0' def _print_Rationals(self, expr): return 'Rationals' def _print_Reals(self, expr): return 'Reals' def _print_Complexes(self, expr): return 'Complexes' def _print_EmptySet(self, expr): return 'EmptySet' def _print_EmptySequence(self, expr): return 'EmptySequence' def _print_int(self, expr): return str(expr) def _print_mpz(self, expr): return str(expr) def _print_Rational(self, expr): if expr.q == 1: return str(expr.p) else: if self._settings.get("sympy_integers", False): return "S(%s)/%s" % (expr.p, expr.q) return "%s/%s" % (expr.p, expr.q) def _print_PythonRational(self, expr): if expr.q == 1: return str(expr.p) else: return "%d/%d" % (expr.p, expr.q) def _print_Fraction(self, expr): if expr.denominator == 1: return str(expr.numerator) else: return "%s/%s" % (expr.numerator, expr.denominator) def _print_mpq(self, expr): if expr.denominator == 1: return str(expr.numerator) else: return "%s/%s" % (expr.numerator, expr.denominator) def _print_Float(self, expr): prec = expr._prec dps = self._settings.get('dps', None) if dps is None: dps = 0 if prec < 5 else prec_to_dps(expr._prec) if self._settings["full_prec"] is True: strip = False elif self._settings["full_prec"] is False: strip = True elif self._settings["full_prec"] == "auto": strip = self._print_level > 1 low = self._settings["min"] if "min" in self._settings else None high = self._settings["max"] if "max" in self._settings else None rv = mlib_to_str(expr._mpf_, dps, strip_zeros=strip, min_fixed=low, max_fixed=high) if rv.startswith('-.0'): rv = '-0.' + rv[3:] elif rv.startswith('.0'): rv = '0.' + rv[2:] rv = rv.removeprefix('+') # e.g., +inf -> inf return rv def _print_Relational(self, expr): charmap = { "==": "Eq", "!=": "Ne", ":=": "Assignment", '+=': "AddAugmentedAssignment", "-=": "SubAugmentedAssignment", "*=": "MulAugmentedAssignment", "/=": "DivAugmentedAssignment", "%=": "ModAugmentedAssignment", } if expr.rel_op in charmap: return '%s(%s, %s)' % (charmap[expr.rel_op], self._print(expr.lhs), self._print(expr.rhs)) return '%s %s %s' % (self.parenthesize(expr.lhs, precedence(expr)), self._relationals.get(expr.rel_op) or expr.rel_op, self.parenthesize(expr.rhs, precedence(expr))) def _print_ComplexRootOf(self, expr): return "CRootOf(%s, %d)" % (self._print_Add(expr.expr, order='lex'), expr.index) def _print_RootSum(self, expr): args = [self._print_Add(expr.expr, order='lex')] if expr.fun is not S.IdentityFunction: args.append(self._print(expr.fun)) return "RootSum(%s)" % ", ".join(args) def _print_GroebnerBasis(self, basis): cls = basis.__class__.__name__ exprs = [self._print_Add(arg, order=basis.order) for arg in basis.exprs] exprs = "[%s]" % ", ".join(exprs) gens = [ self._print(gen) for gen in basis.gens ] domain = "domain='%s'" % self._print(basis.domain) order = "order='%s'" % self._print(basis.order) args = [exprs] + gens + [domain, order] return "%s(%s)" % (cls, ", ".join(args)) def _print_set(self, s): items = sorted(s, key=default_sort_key) args = ', '.join(self._print(item) for item in items) if not args: return "set()" return '{%s}' % args def _print_FiniteSet(self, s): from sympy.sets.sets import FiniteSet items = sorted(s, key=default_sort_key) args = ', '.join(self._print(item) for item in items) if any(item.has(FiniteSet) for item in items): return 'FiniteSet({})'.format(args) return '{{{}}}'.format(args) def _print_Partition(self, s): items = sorted(s, key=default_sort_key) args = ', '.join(self._print(arg) for arg in items) return 'Partition({})'.format(args) def _print_frozenset(self, s): if not s: return "frozenset()" return "frozenset(%s)" % self._print_set(s) def _print_Sum(self, expr): def _xab_tostr(xab): if len(xab) == 1: return self._print(xab[0]) else: return self._print((xab[0],) + tuple(xab[1:])) L = ', '.join([_xab_tostr(l) for l in expr.limits]) return 'Sum(%s, %s)' % (self._print(expr.function), L) def _print_Symbol(self, expr): return expr.name _print_MatrixSymbol = _print_Symbol _print_RandomSymbol = _print_Symbol def _print_Identity(self, expr): return "I" def _print_ZeroMatrix(self, expr): return "0" def _print_OneMatrix(self, expr): return "1" def _print_Predicate(self, expr): return "Q.%s" % expr.name def _print_str(self, expr): return str(expr) def _print_tuple(self, expr): if len(expr) == 1: return "(%s,)" % self._print(expr[0]) else: return "(%s)" % self.stringify(expr, ", ") def _print_Tuple(self, expr): return self._print_tuple(expr) def _print_Transpose(self, T): return "%s.T" % self.parenthesize(T.arg, PRECEDENCE["Pow"]) def _print_Uniform(self, expr): return "Uniform(%s, %s)" % (self._print(expr.a), self._print(expr.b)) def _print_Quantity(self, expr): if self._settings.get("abbrev", False): return "%s" % expr.abbrev return "%s" % expr.name def _print_Quaternion(self, expr): s = [self.parenthesize(i, PRECEDENCE["Mul"], strict=True) for i in expr.args] a = [s[0]] + [i+"*"+j for i, j in zip(s[1:], "ijk")] return " + ".join(a) def _print_Dimension(self, expr): return str(expr) def _print_Wild(self, expr): return expr.name + '_' def _print_WildFunction(self, expr): return expr.name + '_' def _print_WildDot(self, expr): return expr.name def _print_WildPlus(self, expr): return expr.name def _print_WildStar(self, expr): return expr.name def _print_Zero(self, expr): if self._settings.get("sympy_integers", False): return "S(0)" return self._print_Integer(Integer(0)) def _print_DMP(self, p): cls = p.__class__.__name__ rep = self._print(p.to_list()) dom = self._print(p.dom) return "%s(%s, %s)" % (cls, rep, dom) def _print_DMF(self, expr): cls = expr.__class__.__name__ num = self._print(expr.num) den = self._print(expr.den) dom = self._print(expr.dom) return "%s(%s, %s, %s)" % (cls, num, den, dom) def _print_Object(self, obj): return 'Object("%s")' % obj.name def _print_IdentityMorphism(self, morphism): return 'IdentityMorphism(%s)' % morphism.domain def _print_NamedMorphism(self, morphism): return 'NamedMorphism(%s, %s, "%s")' % \ (morphism.domain, morphism.codomain, morphism.name) def _print_Category(self, category): return 'Category("%s")' % category.name def _print_Manifold(self, manifold): return manifold.name.name def _print_Patch(self, patch): return patch.name.name def _print_CoordSystem(self, coords): return coords.name.name def _print_BaseScalarField(self, field): return field._coord_sys.symbols[field._index].name def _print_BaseVectorField(self, field): return 'e_%s' % field._coord_sys.symbols[field._index].name def _print_Differential(self, diff): field = diff._form_field if hasattr(field, '_coord_sys'): return 'd%s' % field._coord_sys.symbols[field._index].name else: return 'd(%s)' % self._print(field) def _print_Tr(self, expr): #TODO : Handle indices return "%s(%s)" % ("Tr", self._print(expr.args[0])) def _print_Str(self, s): return self._print(s.name) def _print_AppliedBinaryRelation(self, expr): rel = expr.function return '%s(%s, %s)' % (self._print(rel), self._print(expr.lhs), self._print(expr.rhs)) @print_function(StrPrinter) def sstr(expr, **settings): """Returns the expression as a string. For large expressions where speed is a concern, use the setting order='none'. If abbrev=True setting is used then units are printed in abbreviated form. Examples ======== >>> from sympy import symbols, Eq, sstr >>> a, b = symbols('a b') >>> sstr(Eq(a + b, 0)) 'Eq(a + b, 0)' """ p = StrPrinter(settings) s = p.doprint(expr) return s
StrPrinter
python
kamyu104__LeetCode-Solutions
Python/find-the-power-of-k-size-subarrays-i.py
{ "start": 60, "end": 570 }
class ____(object): def resultsArray(self, nums, k): """ :type nums: List[int] :type k: int :rtype: List[int] """ result = [-1]*(len(nums)-k+1) left = 0 for right in xrange(len(nums)): if nums[right]-nums[left] != right-left: left = right if right-left+1 == k: result[left] = nums[right] left += 1 return result # Time: O(n^2) # Space: O(1) # brute force
Solution
python
conda__conda
conda/exceptions.py
{ "start": 34409, "end": 35582 }
class ____(CondaError, OSError): def __init__(self, path: PathType, errno: int, **kwargs): kwargs.update( { "path": path, "errno": errno, } ) if on_win: message = dals( """ The current user does not have write permissions to a required path. path: %(path)s """ ) else: message = dals( """ The current user does not have write permissions to a required path. path: %(path)s uid: %(uid)s gid: %(gid)s If you feel that permissions on this path are set incorrectly, you can manually change them by executing $ sudo chown %(uid)s:%(gid)s %(path)s In general, it's not advisable to use 'sudo conda'. """ ) kwargs.update( { "uid": os.geteuid(), "gid": os.getegid(), } ) super().__init__(message, **kwargs) self.errno = errno
NotWritableError
python
sympy__sympy
sympy/physics/mechanics/joint.py
{ "start": 41230, "end": 53058 }
class ____(Joint): """Cylindrical Joint. .. image:: CylindricalJoint.svg :align: center :width: 600 Explanation =========== A cylindrical joint is defined such that the child body both rotates about and translates along the body-fixed joint axis with respect to the parent body. The joint axis is both the rotation axis and translation axis. The location of the joint is defined by two points, one in each body, which coincide when the generalized coordinate corresponding to the translation is zero. The direction cosine matrix between the child interframe and parent interframe is formed using a simple rotation about the joint axis. The page on the joints framework gives a more detailed explanation of the intermediate frames. Parameters ========== name : string A unique name for the joint. parent : Particle or RigidBody The parent body of joint. child : Particle or RigidBody The child body of joint. rotation_coordinate : dynamicsymbol, optional Generalized coordinate corresponding to the rotation angle. The default value is ``dynamicsymbols(f'q0_{joint.name}')``. translation_coordinate : dynamicsymbol, optional Generalized coordinate corresponding to the translation distance. The default value is ``dynamicsymbols(f'q1_{joint.name}')``. rotation_speed : dynamicsymbol, optional Generalized speed corresponding to the angular velocity. The default value is ``dynamicsymbols(f'u0_{joint.name}')``. translation_speed : dynamicsymbol, optional Generalized speed corresponding to the translation velocity. The default value is ``dynamicsymbols(f'u1_{joint.name}')``. parent_point : Point or Vector, optional Attachment point where the joint is fixed to the parent body. If a vector is provided, then the attachment point is computed by adding the vector to the body's mass center. The default value is the parent's mass center. child_point : Point or Vector, optional Attachment point where the joint is fixed to the child body. If a vector is provided, then the attachment point is computed by adding the vector to the body's mass center. The default value is the child's mass center. parent_interframe : ReferenceFrame, optional Intermediate frame of the parent body with respect to which the joint transformation is formulated. If a Vector is provided then an interframe is created which aligns its X axis with the given vector. The default value is the parent's own frame. child_interframe : ReferenceFrame, optional Intermediate frame of the child body with respect to which the joint transformation is formulated. If a Vector is provided then an interframe is created which aligns its X axis with the given vector. The default value is the child's own frame. joint_axis : Vector, optional The rotation as well as translation axis. Note that the components of this axis are the same in the parent_interframe and child_interframe. Attributes ========== name : string The joint's name. parent : Particle or RigidBody The joint's parent body. child : Particle or RigidBody The joint's child body. rotation_coordinate : dynamicsymbol Generalized coordinate corresponding to the rotation angle. translation_coordinate : dynamicsymbol Generalized coordinate corresponding to the translation distance. rotation_speed : dynamicsymbol Generalized speed corresponding to the angular velocity. translation_speed : dynamicsymbol Generalized speed corresponding to the translation velocity. coordinates : Matrix Matrix of the joint's generalized coordinates. speeds : Matrix Matrix of the joint's generalized speeds. parent_point : Point Attachment point where the joint is fixed to the parent body. child_point : Point Attachment point where the joint is fixed to the child body. parent_interframe : ReferenceFrame Intermediate frame of the parent body with respect to which the joint transformation is formulated. child_interframe : ReferenceFrame Intermediate frame of the child body with respect to which the joint transformation is formulated. kdes : Matrix Kinematical differential equations of the joint. joint_axis : Vector The axis of rotation and translation. Examples ========= A single cylindrical joint is created between two bodies and has the following basic attributes: >>> from sympy.physics.mechanics import RigidBody, CylindricalJoint >>> parent = RigidBody('P') >>> parent P >>> child = RigidBody('C') >>> child C >>> joint = CylindricalJoint('PC', parent, child) >>> joint CylindricalJoint: PC parent: P child: C >>> joint.name 'PC' >>> joint.parent P >>> joint.child C >>> joint.parent_point P_masscenter >>> joint.child_point C_masscenter >>> joint.parent_axis P_frame.x >>> joint.child_axis C_frame.x >>> joint.coordinates Matrix([ [q0_PC(t)], [q1_PC(t)]]) >>> joint.speeds Matrix([ [u0_PC(t)], [u1_PC(t)]]) >>> child.frame.ang_vel_in(parent.frame) u0_PC(t)*P_frame.x >>> child.frame.dcm(parent.frame) Matrix([ [1, 0, 0], [0, cos(q0_PC(t)), sin(q0_PC(t))], [0, -sin(q0_PC(t)), cos(q0_PC(t))]]) >>> joint.child_point.pos_from(joint.parent_point) q1_PC(t)*P_frame.x >>> child.masscenter.vel(parent.frame) u1_PC(t)*P_frame.x To further demonstrate the use of the cylindrical joint, the kinematics of two cylindrical joints perpendicular to each other can be created as follows. >>> from sympy import symbols >>> from sympy.physics.mechanics import RigidBody, CylindricalJoint >>> r, l, w = symbols('r l w') First create bodies to represent the fixed floor with a fixed pole on it. The second body represents a freely moving tube around that pole. The third body represents a solid flag freely translating along and rotating around the Y axis of the tube. >>> floor = RigidBody('floor') >>> tube = RigidBody('tube') >>> flag = RigidBody('flag') The first joint will connect the first tube to the floor with it translating along and rotating around the Z axis of both bodies. >>> floor_joint = CylindricalJoint('C1', floor, tube, joint_axis=floor.z) The second joint will connect the tube perpendicular to the flag along the Y axis of both the tube and the flag, with the joint located at a distance ``r`` from the tube's center of mass and a combination of the distances ``l`` and ``w`` from the flag's center of mass. >>> flag_joint = CylindricalJoint('C2', tube, flag, ... parent_point=r * tube.y, ... child_point=-w * flag.y + l * flag.z, ... joint_axis=tube.y) Once the joints are established the kinematics of the connected bodies can be accessed. First the direction cosine matrices of both the body and the flag relative to the floor are found: >>> tube.frame.dcm(floor.frame) Matrix([ [ cos(q0_C1(t)), sin(q0_C1(t)), 0], [-sin(q0_C1(t)), cos(q0_C1(t)), 0], [ 0, 0, 1]]) >>> flag.frame.dcm(floor.frame) Matrix([ [cos(q0_C1(t))*cos(q0_C2(t)), sin(q0_C1(t))*cos(q0_C2(t)), -sin(q0_C2(t))], [ -sin(q0_C1(t)), cos(q0_C1(t)), 0], [sin(q0_C2(t))*cos(q0_C1(t)), sin(q0_C1(t))*sin(q0_C2(t)), cos(q0_C2(t))]]) The position of the flag's center of mass is found with: >>> flag.masscenter.pos_from(floor.masscenter) q1_C1(t)*floor_frame.z + (r + q1_C2(t))*tube_frame.y + w*flag_frame.y - l*flag_frame.z The angular velocities of the two tubes can be computed with respect to the floor. >>> tube.frame.ang_vel_in(floor.frame) u0_C1(t)*floor_frame.z >>> flag.frame.ang_vel_in(floor.frame) u0_C1(t)*floor_frame.z + u0_C2(t)*tube_frame.y Finally, the linear velocities of the two tube centers of mass can be computed with respect to the floor, while expressed in the tube's frame. >>> tube.masscenter.vel(floor.frame).to_matrix(tube.frame) Matrix([ [ 0], [ 0], [u1_C1(t)]]) >>> flag.masscenter.vel(floor.frame).to_matrix(tube.frame).simplify() Matrix([ [-l*u0_C2(t)*cos(q0_C2(t)) - r*u0_C1(t) - w*u0_C1(t) - q1_C2(t)*u0_C1(t)], [ -l*u0_C1(t)*sin(q0_C2(t)) + Derivative(q1_C2(t), t)], [ l*u0_C2(t)*sin(q0_C2(t)) + u1_C1(t)]]) """ def __init__(self, name, parent, child, rotation_coordinate=None, translation_coordinate=None, rotation_speed=None, translation_speed=None, parent_point=None, child_point=None, parent_interframe=None, child_interframe=None, joint_axis=None): self._joint_axis = joint_axis coordinates = (rotation_coordinate, translation_coordinate) speeds = (rotation_speed, translation_speed) super().__init__(name, parent, child, coordinates, speeds, parent_point, child_point, parent_interframe=parent_interframe, child_interframe=child_interframe) def __str__(self): return (f'CylindricalJoint: {self.name} parent: {self.parent} ' f'child: {self.child}') @property def joint_axis(self): """Axis about and along which the rotation and translation occurs.""" return self._joint_axis @property def rotation_coordinate(self): """Generalized coordinate corresponding to the rotation angle.""" return self.coordinates[0] @property def translation_coordinate(self): """Generalized coordinate corresponding to the translation distance.""" return self.coordinates[1] @property def rotation_speed(self): """Generalized speed corresponding to the angular velocity.""" return self.speeds[0] @property def translation_speed(self): """Generalized speed corresponding to the translation velocity.""" return self.speeds[1] def _generate_coordinates(self, coordinates): return self._fill_coordinate_list(coordinates, 2, 'q') def _generate_speeds(self, speeds): return self._fill_coordinate_list(speeds, 2, 'u') def _orient_frames(self): self._joint_axis = self._axis(self.joint_axis, self.parent_interframe) self.child_interframe.orient_axis( self.parent_interframe, self.joint_axis, self.rotation_coordinate) def _set_angular_velocity(self): self.child_interframe.set_ang_vel( self.parent_interframe, self.rotation_speed * self.joint_axis.normalize()) def _set_linear_velocity(self): self.child_point.set_pos( self.parent_point, self.translation_coordinate * self.joint_axis.normalize()) self.parent_point.set_vel(self._parent_frame, 0) self.child_point.set_vel(self._child_frame, 0) self.child_point.set_vel( self._parent_frame, self.translation_speed * self.joint_axis.normalize()) self.child.masscenter.v2pt_theory(self.child_point, self._parent_frame, self.child_interframe)
CylindricalJoint
python
astropy__astropy
astropy/cosmology/_src/traits/darkenergy.py
{ "start": 300, "end": 5238 }
class ____: # Subclasses should use `Parameter` to make this a parameter of the cosmology. Ode0: float | np.floating """Omega dark energy; dark energy density/critical density at z=0.""" @abstractmethod def w(self, z: Quantity | ArrayLike, /) -> FArray: r"""The dark energy equation of state. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. .. versionchanged:: 7.0 Passing z as a keyword argument is deprecated. .. versionchanged:: 8.0 z must be a positional argument. Returns ------- w : ndarray or float The dark energy equation of state. `float` if scalar input. Notes ----- The dark energy equation of state is defined as :math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\rho(z)` is the density at redshift z, both in units where c=1. This must be overridden by subclasses. """ raise NotImplementedError("w(z) is not implemented") def _w_integrand(self, ln1pz: float | FArray, /) -> FArray: """Internal convenience function for w(z) integral (eq. 5 of [1]_). Parameters ---------- ln1pz : `~numbers.Number` or scalar ndarray, positional-only Assumes scalar input, since this should only be called inside an integral. .. versionchanged:: 7.0 The argument is positional-only. References ---------- .. [1] Linder, E. (2003). Exploring the Expansion History of the Universe. Phys. Rev. Lett., 90, 091301. """ return 1.0 + self.w(exp(ln1pz) - 1.0) def de_density_scale(self, z: Quantity | ArrayLike, /) -> FArray: r"""Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. .. versionchanged:: 7.0 Passing z as a keyword argument is deprecated. .. versionchanged:: 8.0 z must be a positional argument. Returns ------- I : ndarray or float The scaling of the energy density of dark energy with redshift. Returns `float` if the input is scalar. Notes ----- The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, and is given by .. math:: I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} } \left[ 1 + w\left( a^{\prime} \right) \right] \right) The actual integral used is rewritten from [1]_ to be in terms of z. It will generally helpful for subclasses to overload this method if the integral can be done analytically for the particular dark energy equation of state that they implement. References ---------- .. [1] Linder, E. (2003). Exploring the Expansion History of the Universe. Phys. Rev. Lett., 90, 091301. """ # This allows for an arbitrary w(z) following eq (5) of # Linder 2003, PRL 90, 91301. The code here evaluates # the integral numerically. However, most popular # forms of w(z) are designed to make this integral analytic, # so it is probably a good idea for subclasses to overload this # method if an analytic form is available. z = aszarr(z) ival = ( quad(self._w_integrand, 0, log(z + 1.0))[0] # scalar if z.ndim == 0 else np.asarray([quad(self._w_integrand, 0, log(1 + _z))[0] for _z in z]) ) return np.exp(3 * ival) def Ode(self, z: Quantity | ArrayLike, /) -> FArray: """Return the density parameter for dark energy at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like Input redshift. .. versionchanged:: 7.0 Passing z as a keyword argument is deprecated. .. versionchanged:: 8.0 z must be a positional argument. Returns ------- Ode : ndarray The density of dark energy relative to the critical density at each redshift. """ z = aszarr(z) if self.Ode0 == 0: # Common enough to be worth checking explicitly return np.zeros_like(z) # Ensure self.inv_efunc is implemented by the main class if not hasattr(self, "inv_efunc") or not callable(self.inv_efunc): msg = "The main class must implement an 'inv_efunc(z)' method." raise NotImplementedError(msg) return self.Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2
DarkEnergyComponent
python
numpy__numpy
numpy/_core/tests/test_umath_accuracy.py
{ "start": 1905, "end": 5821 }
class ____: @platform_skip def test_validate_transcendentals(self): with np.errstate(all='ignore'): data_dir = path.join(path.dirname(__file__), 'data') files = os.listdir(data_dir) files = list(filter(lambda f: f.endswith('.csv'), files)) for filename in files: filepath = path.join(data_dir, filename) with open(filepath) as fid: file_without_comments = ( r for r in fid if r[0] not in ('$', '#') ) data = np.genfromtxt(file_without_comments, dtype=('|S39', '|S39', '|S39', int), names=('type', 'input', 'output', 'ulperr'), delimiter=',', skip_header=1) npname = path.splitext(filename)[0].split('-')[3] npfunc = getattr(np, npname) for datatype in np.unique(data['type']): data_subset = data[data['type'] == datatype] data_input_str = data_subset['input'].astype(str) data_output_str = data_subset['output'].astype(str) data_type_str = data_subset['type'].astype(str) inval = np.array(str_to_float(data_input_str, data_type_str), dtype=eval(datatype)) outval = np.array(str_to_float(data_output_str, data_type_str), dtype=eval(datatype)) perm = np.random.permutation(len(inval)) inval = inval[perm] outval = outval[perm] maxulperr = data_subset['ulperr'].max() assert_array_max_ulp(npfunc(inval), outval, maxulperr) @pytest.mark.skipif(IS_AVX512FP16, reason="SVML FP16 have slightly higher ULP errors") @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) def test_validate_fp16_transcendentals(self, ufunc): with np.errstate(all='ignore'): arr = np.arange(65536, dtype=np.int16) datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) datafp32 = datafp16.astype(np.float32) assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), maxulp=1, dtype=np.float16) @pytest.mark.skipif(not IS_AVX512FP16, reason="lower ULP only apply for SVML FP16") def test_validate_svml_fp16(self): max_ulp_err = { "arccos": 2.54, "arccosh": 2.09, "arcsin": 3.06, "arcsinh": 1.51, "arctan": 2.61, "arctanh": 1.88, "cbrt": 1.57, "cos": 1.43, "cosh": 1.33, "exp2": 1.33, "exp": 1.27, "expm1": 0.53, "log": 1.80, "log10": 1.27, "log1p": 1.88, "log2": 1.80, "sin": 1.88, "sinh": 2.05, "tan": 2.26, "tanh": 3.00, } with np.errstate(all='ignore'): arr = np.arange(65536, dtype=np.int16) datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) datafp32 = datafp16.astype(np.float32) for func in max_ulp_err: ufunc = getattr(np, func) ulp = np.ceil(max_ulp_err[func]) assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), maxulp=ulp, dtype=np.float16)
TestAccuracy
python
allegroai__clearml
clearml/backend_api/services/v2_20/models.py
{ "start": 117052, "end": 120080 }
class ____(Request): """ Publish models :param ids: IDs of the models to publish :type ids: Sequence[str] :param force_publish_task: Publish the associated tasks (if exist) even if they are not in the 'stopped' state. Optional, the default value is False. :type force_publish_task: bool :param publish_tasks: Indicates that the associated tasks (if exist) should be published. Optional, the default value is True. :type publish_tasks: bool """ _service = "models" _action = "publish_many" _version = "2.20" _schema = { "definitions": {}, "properties": { "force_publish_task": { "description": "Publish the associated tasks (if exist) even if they are not in the 'stopped' state. Optional, the default value is False.", "type": "boolean", }, "ids": { "description": "IDs of the models to publish", "items": {"type": "string"}, "type": "array", }, "publish_tasks": { "description": "Indicates that the associated tasks (if exist) should be published. Optional, the default value is True.", "type": "boolean", }, }, "required": ["ids"], "type": "object", } def __init__( self, ids: List[str], force_publish_task: Optional[bool] = None, publish_tasks: Optional[bool] = None, **kwargs: Any ) -> None: super(PublishManyRequest, self).__init__(**kwargs) self.ids = ids self.force_publish_task = force_publish_task self.publish_tasks = publish_tasks @schema_property("ids") def ids(self) -> List[str]: return self._property_ids @ids.setter def ids(self, value: List[str]) -> None: if value is None: self._property_ids = None return self.assert_isinstance(value, "ids", (list, tuple)) self.assert_isinstance(value, "ids", six.string_types, is_array=True) self._property_ids = value @schema_property("force_publish_task") def force_publish_task(self) -> Optional[bool]: return self._property_force_publish_task @force_publish_task.setter def force_publish_task(self, value: Optional[bool]) -> None: if value is None: self._property_force_publish_task = None return self.assert_isinstance(value, "force_publish_task", (bool,)) self._property_force_publish_task = value @schema_property("publish_tasks") def publish_tasks(self) -> Optional[bool]: return self._property_publish_tasks @publish_tasks.setter def publish_tasks(self, value: Optional[bool]) -> None: if value is None: self._property_publish_tasks = None return self.assert_isinstance(value, "publish_tasks", (bool,)) self._property_publish_tasks = value
PublishManyRequest
python
apache__airflow
providers/fab/src/airflow/providers/fab/auth_manager/views/user.py
{ "start": 4869, "end": 6031 }
class ____(MultiResourceUserMixin, UserDBModelView): """Customize permission names for FAB's builtin UserDBModelView.""" _class_permission_name = permissions.RESOURCE_USER class_permission_name_mapping = { "resetmypassword": permissions.RESOURCE_MY_PASSWORD, "resetpasswords": permissions.RESOURCE_PASSWORD, "userinfoedit": permissions.RESOURCE_MY_PROFILE, "userinfo": permissions.RESOURCE_MY_PROFILE, } method_permission_name = { "add": "create", "download": "read", "show": "read", "list": "read", "edit": "edit", "delete": "delete", "resetmypassword": "read", "resetpasswords": "read", "userinfo": "read", "userinfoedit": "read", } add_columns = [ "first_name", "last_name", "username", "active", "email", "roles", "password", "conf_password", ] base_permissions = [ permissions.ACTION_CAN_CREATE, permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_DELETE, ]
CustomUserDBModelView
python
apache__airflow
providers/teradata/src/airflow/providers/teradata/operators/teradata_compute_cluster.py
{ "start": 2319, "end": 8021 }
class ____(BaseOperator): """ Teradata Compute Cluster Base Operator to set up and status operations of compute cluster. :param compute_profile_name: Name of the Compute Profile to manage. :param compute_group_name: Name of compute group to which compute profile belongs. :param teradata_conn_id: The :ref:`Teradata connection id <howto/connection:teradata>` reference to a specific Teradata database. :param timeout: Time elapsed before the task times out and fails. """ template_fields: Sequence[str] = ( "compute_profile_name", "compute_group_name", "teradata_conn_id", "timeout", ) ui_color = "#e07c24" def __init__( self, compute_profile_name: str, compute_group_name: str | None = None, teradata_conn_id: str = TeradataHook.default_conn_name, timeout: int = Constants.CC_OPR_TIME_OUT, **kwargs, ) -> None: super().__init__(**kwargs) self.compute_profile_name = compute_profile_name self.compute_group_name = compute_group_name self.teradata_conn_id = teradata_conn_id self.timeout = timeout @cached_property def hook(self) -> TeradataHook: return TeradataHook(teradata_conn_id=self.teradata_conn_id) @abstractmethod def execute(self, context: Context): pass def execute_complete(self, context: Context, event: dict[str, Any]) -> None: """ Execute when the trigger fires - returns immediately. Relies on trigger to throw an exception, otherwise it assumes execution was successful. """ self._compute_cluster_execute_complete(event) def _compute_cluster_execute(self, operation: str | None = None): # Verifies the provided compute profile name. if ( self.compute_profile_name is None or self.compute_profile_name == "None" or self.compute_profile_name == "" ): raise AirflowException(Constants.CC_OPR_EMPTY_PROFILE_ERROR_MSG % operation) try: # Verifies if the provided Teradata instance belongs to Vantage Cloud Lake. lake_support_find_sql = "SELECT count(1) from DBC.StorageV WHERE StorageName='TD_OFSSTORAGE'" lake_support_result = self.hook.run(lake_support_find_sql, handler=_single_result_row_handler) if lake_support_result is None: raise AirflowException(Constants.CC_GRP_LAKE_SUPPORT_ONLY_MSG % operation) except Exception: raise AirflowException(Constants.CC_GRP_LAKE_SUPPORT_ONLY_MSG % operation) # Getting teradata db version. Considering teradata instance is Lake when db version is 20 or above db_version_get_sql = "SELECT InfoData AS Version FROM DBC.DBCInfoV WHERE InfoKey = 'VERSION'" try: db_version_result = self.hook.run(db_version_get_sql, handler=_single_result_row_handler) if db_version_result is not None: # Safely extract the actual version string from the result if isinstance(db_version_result, (list, tuple)) and db_version_result: # e.g., if it's a tuple like ('17.10',), get the first element version_str = str(db_version_result[0]) else: version_str = str(db_version_result) # fallback, should be rare db_version = version_str.split(".")[0] if db_version is not None and int(db_version) < 20: raise AirflowException(Constants.CC_GRP_LAKE_SUPPORT_ONLY_MSG % operation) else: raise AirflowException(Constants.CC_ERR_VERSION_GET) except Exception: raise AirflowException(Constants.CC_ERR_VERSION_GET) def _compute_cluster_execute_complete(self, event: dict[str, Any]) -> None: if event["status"] == "success": return event["message"] if event["status"] == "error": raise AirflowException(event["message"]) def _handle_cc_status(self, operation_type, sql): create_sql_result = self._hook_run(sql, handler=_single_result_row_handler) self.log.info( "%s query ran successfully. Differing to trigger to check status in db. Result from sql: %s", operation_type, create_sql_result, ) self.defer( timeout=timedelta(minutes=self.timeout), trigger=TeradataComputeClusterSyncTrigger( teradata_conn_id=cast("str", self.teradata_conn_id), compute_profile_name=self.compute_profile_name, compute_group_name=self.compute_group_name, operation_type=operation_type, poll_interval=Constants.CC_POLL_INTERVAL, ), method_name="execute_complete", ) return create_sql_result def _hook_run(self, query, handler=None): try: if handler is not None: return self.hook.run(query, handler=handler) return self.hook.run(query) except Exception as ex: self.log.error(str(ex)) raise def _get_initially_suspended(self, create_cp_query): initially_suspended = "FALSE" pattern = r"INITIALLY_SUSPENDED\s*\(\s*'(TRUE|FALSE)'\s*\)" # Search for the pattern in the input string match = re.search(pattern, create_cp_query, re.IGNORECASE) if match: # Get the value of INITIALLY_SUSPENDED initially_suspended = match.group(1).strip().upper() return initially_suspended
_TeradataComputeClusterOperator
python
ray-project__ray
python/ray/util/client/server/server_stubs.py
{ "start": 1007, "end": 1346 }
class ____(ClientReferenceSentinel): def get_remote_obj(self): global _current_server real_ref_id = self.get_real_ref_from_server() if real_ref_id is None: return None return _current_server.lookup_or_register_actor( real_ref_id, self.client_id, None )
ClientReferenceActor
python
protocolbuffers__protobuf
python/google/protobuf/internal/well_known_types_test.py
{ "start": 26342, "end": 37362 }
class ____(unittest.TestCase): def testEmptyDict(self): # in operator for empty initialized struct msg = well_known_types_test_pb2.WKTMessage(optional_struct={}) self.assertNotIn('key', msg.optional_struct) def testStruct(self): struct = struct_pb2.Struct() self.assertIsInstance(struct, collections_abc.Mapping) self.assertEqual(0, len(struct)) struct_class = struct.__class__ struct['key1'] = 5 struct['key2'] = 'abc' struct['key3'] = True struct.get_or_create_struct('key4')['subkey'] = 11.0 struct_list = struct.get_or_create_list('key5') self.assertIsInstance(struct_list, collections_abc.Sequence) struct_list.extend([6, 'seven', True, False, None]) struct_list.add_struct()['subkey2'] = 9 struct['key6'] = {'subkey': {}} struct['key7'] = [2, False] self.assertEqual(7, len(struct)) self.assertTrue(isinstance(struct, well_known_types.Struct)) self.assertEqual(5, struct['key1']) self.assertEqual('abc', struct['key2']) self.assertIs(True, struct['key3']) self.assertEqual(11, struct['key4']['subkey']) inner_struct = struct_class() inner_struct['subkey2'] = 9 self.assertEqual( [6, 'seven', True, False, None, inner_struct], list(struct['key5'].items()), ) self.assertEqual({}, dict(struct['key6']['subkey'].fields)) self.assertEqual([2, False], list(struct['key7'].items())) serialized = struct.SerializeToString() struct2 = struct_pb2.Struct() struct2.ParseFromString(serialized) self.assertEqual(struct, struct2) for key, value in struct.items(): self.assertIn(key, struct) self.assertIn(key, struct2) self.assertEqual(value, struct2[key]) self.assertEqual(7, len(struct.keys())) self.assertEqual(7, len(struct.values())) for key in struct.keys(): self.assertIn(key, struct) self.assertIn(key, struct2) self.assertEqual(struct[key], struct2[key]) item = (next(iter(struct.keys())), next(iter(struct.values()))) self.assertEqual(item, next(iter(struct.items()))) self.assertTrue(isinstance(struct2, well_known_types.Struct)) self.assertEqual(5, struct2['key1']) self.assertEqual('abc', struct2['key2']) self.assertIs(True, struct2['key3']) self.assertEqual(11, struct2['key4']['subkey']) self.assertEqual( [6, 'seven', True, False, None, inner_struct], list(struct2['key5'].items()), ) struct_list = struct2['key5'] self.assertEqual(6, struct_list[0]) self.assertEqual('seven', struct_list[1]) self.assertEqual(True, struct_list[2]) self.assertEqual(False, struct_list[3]) self.assertEqual(None, struct_list[4]) self.assertEqual(inner_struct, struct_list[5]) struct_list[1] = 7 self.assertEqual(7, struct_list[1]) struct_list.add_list().extend([1, 'two', True, False, None]) self.assertEqual( [1, 'two', True, False, None], list(struct_list[6].items()) ) struct_list.extend([{'nested_struct': 30}, ['nested_list', 99], {}, []]) self.assertEqual(11, len(struct_list.values)) self.assertEqual(30, struct_list[7]['nested_struct']) self.assertEqual('nested_list', struct_list[8][0]) self.assertEqual(99, struct_list[8][1]) self.assertEqual({}, dict(struct_list[9].fields)) self.assertEqual([], list(struct_list[10].items())) struct_list[0] = {'replace': 'set'} struct_list[1] = ['replace', 'set'] self.assertEqual('set', struct_list[0]['replace']) self.assertEqual(['replace', 'set'], list(struct_list[1].items())) text_serialized = str(struct) struct3 = struct_pb2.Struct() text_format.Merge(text_serialized, struct3) self.assertEqual(struct, struct3) struct.get_or_create_struct('key3')['replace'] = 12 self.assertEqual(12, struct['key3']['replace']) # Tests empty list. struct.get_or_create_list('empty_list') empty_list = struct['empty_list'] self.assertEqual([], list(empty_list.items())) list2 = struct_pb2.ListValue() list2.add_list() empty_list = list2[0] self.assertEqual([], list(empty_list.items())) # Tests empty struct. struct.get_or_create_struct('empty_struct') empty_struct = struct['empty_struct'] self.assertEqual({}, dict(empty_struct.fields)) list2.add_struct() empty_struct = list2[1] self.assertEqual({}, dict(empty_struct.fields)) self.assertEqual(9, len(struct)) del struct['key3'] del struct['key4'] self.assertEqual(7, len(struct)) self.assertEqual(6, len(struct['key5'])) del struct['key5'][1] self.assertEqual(5, len(struct['key5'])) self.assertEqual( [6, True, False, None, inner_struct], list(struct['key5'].items()) ) def testInOperator(self): # in operator for Struct struct = struct_pb2.Struct() struct['key'] = 5 self.assertIn('key', struct) self.assertNotIn('fields', struct) with self.assertRaises(TypeError) as e: 1 in struct # in operator for ListValue struct_list = struct.get_or_create_list('key2') self.assertIsInstance(struct_list, collections_abc.Sequence) struct_list.extend([6, 'seven', True, False, None]) struct_list.add_struct()['subkey'] = 9 inner_struct = struct.__class__() inner_struct['subkey'] = 9 self.assertIn(6, struct_list) self.assertIn('seven', struct_list) self.assertIn(True, struct_list) self.assertIn(False, struct_list) self.assertIn(None, struct_list) self.assertIn(inner_struct, struct_list) self.assertNotIn('values', struct_list) self.assertNotIn(10, struct_list) for item in struct_list: self.assertIn(item, struct_list) def testStructAssignment(self): # Tests struct assignment from another struct s1 = struct_pb2.Struct() s2 = struct_pb2.Struct() for value in [1, 'a', [1], ['a'], {'a': 'b'}]: s1['x'] = value s2['x'] = s1['x'] self.assertEqual(s1['x'], s2['x']) dictionary = { 'key1': 5.0, 'key2': 'abc', 'key3': {'subkey': 11.0, 'k': False}, } msg = well_known_types_test_pb2.WKTMessage() msg.optional_struct = dictionary self.assertEqual(msg.optional_struct, dictionary) # Tests assign is not merge dictionary2 = { 'key4': {'subkey': 11.0, 'k': True}, } msg.optional_struct = dictionary2 self.assertEqual(msg.optional_struct, dictionary2) # Tests assign empty msg2 = well_known_types_test_pb2.WKTMessage() self.assertNotIn('optional_struct', msg2) msg2.optional_struct = {} self.assertIn('optional_struct', msg2) self.assertEqual(msg2.optional_struct, {}) def testListValueAssignment(self): list_value = [6, 'seven', True, False, None, {}] msg = well_known_types_test_pb2.WKTMessage() msg.optional_list_value = list_value self.assertEqual(msg.optional_list_value, list_value) def testStructConstruction(self): dictionary = { 'key1': 5.0, 'key2': 'abc', 'key3': {'subkey': 11.0, 'k': False}, } list_value = [6, 'seven', True, False, None, dictionary] msg = well_known_types_test_pb2.WKTMessage( optional_struct=dictionary, optional_list_value=list_value ) self.assertEqual(len(msg.optional_struct), len(dictionary)) self.assertEqual(msg.optional_struct, dictionary) self.assertEqual(len(msg.optional_list_value), len(list_value)) self.assertEqual(msg.optional_list_value, list_value) msg2 = well_known_types_test_pb2.WKTMessage( optional_struct={}, optional_list_value=[] ) self.assertIn('optional_struct', msg2) self.assertIn('optional_list_value', msg2) self.assertEqual(msg2.optional_struct, {}) self.assertEqual(msg2.optional_list_value, []) def testSpecialStructConstruct(self): dictionary = {'key1': 6.0} msg = well_known_types_test_pb2.WKTMessage(optional_struct=dictionary) self.assertEqual(msg.optional_struct, dictionary) dictionary2 = {'fields': 7.0} msg2 = well_known_types_test_pb2.WKTMessage(optional_struct=dictionary2) self.assertEqual(msg2.optional_struct, dictionary2) # Construct Struct as normal message value_msg = struct_pb2.Value(number_value=5.0) dictionary3 = {'fields': {'key1': value_msg}} msg3 = well_known_types_test_pb2.WKTMessage(optional_struct=dictionary3) self.assertEqual(msg3.optional_struct, {'key1': 5.0}) def testRepeatedStructConstruct(self): dict0 = {'key1': 6.0} dict1 = { 'key1': 'abc', 'key2': {'subkey': 11.0, 'k': True}, } value_msg = struct_pb2.Value(number_value=5.0) dict2 = {'fields': {'key1': value_msg}} msg = well_known_types_test_pb2.WKTMessage( repeated_struct=[dict0, dict1, dict2] ) self.assertEqual(len(msg.repeated_struct), 3) self.assertEqual(msg.repeated_struct[0], dict0) self.assertEqual(msg.repeated_struct[1], dict1) self.assertEqual(msg.repeated_struct[2], {'key1': 5.0}) def testRepeatedListValueConstruct(self): list0 = [6, 'seven', True, False] list1 = [None, {'key': 1.2}] msg = well_known_types_test_pb2.WKTMessage(repeated_list=[list0, list1]) self.assertEqual(len(msg.repeated_list), 2) self.assertEqual(msg.repeated_list[0], list0) self.assertEqual(msg.repeated_list[1], list1) def testMergeFrom(self): struct = struct_pb2.Struct() struct_class = struct.__class__ dictionary = { 'key1': 5, 'key2': 'abc', 'key3': True, 'key4': {'subkey': 11.0}, 'key5': [6, 'seven', True, False, None, {'subkey2': 9}], 'key6': [['nested_list', True]], 'empty_struct': {}, 'empty_list': [], 'tuple': ((3, 2), ()), } struct.update(dictionary) self.assertEqual(5, struct['key1']) self.assertEqual('abc', struct['key2']) self.assertIs(True, struct['key3']) self.assertEqual(11, struct['key4']['subkey']) inner_struct = struct_class() inner_struct['subkey2'] = 9 self.assertEqual( [6, 'seven', True, False, None, inner_struct], list(struct['key5'].items()), ) self.assertEqual(2, len(struct['key6'][0].values)) self.assertEqual('nested_list', struct['key6'][0][0]) self.assertEqual(True, struct['key6'][0][1]) empty_list = struct['empty_list'] self.assertEqual([], list(empty_list.items())) empty_struct = struct['empty_struct'] self.assertEqual({}, dict(empty_struct.fields)) # According to documentation: "When parsing from the wire or when merging, # if there are duplicate map keys the last key seen is used". duplicate = {'key4': {'replace': 20}, 'key5': [[False, 5]]} struct.update(duplicate) self.assertEqual(1, len(struct['key4'].fields)) self.assertEqual(20, struct['key4']['replace']) self.assertEqual(1, len(struct['key5'].values)) self.assertEqual(False, struct['key5'][0][0]) self.assertEqual(5, struct['key5'][0][1]) @testing_refleaks.TestCase
StructTest
python
python-pillow__Pillow
src/PIL/TgaImagePlugin.py
{ "start": 986, "end": 6980 }
class ____(ImageFile.ImageFile): format = "TGA" format_description = "Targa" def _open(self) -> None: # process header assert self.fp is not None s = self.fp.read(18) id_len = s[0] colormaptype = s[1] imagetype = s[2] depth = s[16] flags = s[17] self._size = i16(s, 12), i16(s, 14) # validate header fields if ( colormaptype not in (0, 1) or self.size[0] <= 0 or self.size[1] <= 0 or depth not in (1, 8, 16, 24, 32) ): msg = "not a TGA file" raise SyntaxError(msg) # image mode if imagetype in (3, 11): self._mode = "L" if depth == 1: self._mode = "1" # ??? elif depth == 16: self._mode = "LA" elif imagetype in (1, 9): self._mode = "P" if colormaptype else "L" elif imagetype in (2, 10): self._mode = "RGB" if depth == 24 else "RGBA" else: msg = "unknown TGA mode" raise SyntaxError(msg) # orientation orientation = flags & 0x30 self._flip_horizontally = orientation in [0x10, 0x30] if orientation in [0x20, 0x30]: orientation = 1 elif orientation in [0, 0x10]: orientation = -1 else: msg = "unknown TGA orientation" raise SyntaxError(msg) self.info["orientation"] = orientation if imagetype & 8: self.info["compression"] = "tga_rle" if id_len: self.info["id_section"] = self.fp.read(id_len) if colormaptype: # read palette start, size, mapdepth = i16(s, 3), i16(s, 5), s[7] if mapdepth == 16: self.palette = ImagePalette.raw( "BGRA;15Z", bytes(2 * start) + self.fp.read(2 * size) ) self.palette.mode = "RGBA" elif mapdepth == 24: self.palette = ImagePalette.raw( "BGR", bytes(3 * start) + self.fp.read(3 * size) ) elif mapdepth == 32: self.palette = ImagePalette.raw( "BGRA", bytes(4 * start) + self.fp.read(4 * size) ) else: msg = "unknown TGA map depth" raise SyntaxError(msg) # setup tile descriptor try: rawmode = MODES[(imagetype & 7, depth)] if imagetype & 8: # compressed self.tile = [ ImageFile._Tile( "tga_rle", (0, 0) + self.size, self.fp.tell(), (rawmode, orientation, depth), ) ] else: self.tile = [ ImageFile._Tile( "raw", (0, 0) + self.size, self.fp.tell(), (rawmode, 0, orientation), ) ] except KeyError: pass # cannot decode def load_end(self) -> None: if self._flip_horizontally: self.im = self.im.transpose(Image.Transpose.FLIP_LEFT_RIGHT) # # -------------------------------------------------------------------- # Write TGA file SAVE = { "1": ("1", 1, 0, 3), "L": ("L", 8, 0, 3), "LA": ("LA", 16, 0, 3), "P": ("P", 8, 1, 1), "RGB": ("BGR", 24, 0, 2), "RGBA": ("BGRA", 32, 0, 2), } def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None: try: rawmode, bits, colormaptype, imagetype = SAVE[im.mode] except KeyError as e: msg = f"cannot write mode {im.mode} as TGA" raise OSError(msg) from e if "rle" in im.encoderinfo: rle = im.encoderinfo["rle"] else: compression = im.encoderinfo.get("compression", im.info.get("compression")) rle = compression == "tga_rle" if rle: imagetype += 8 id_section = im.encoderinfo.get("id_section", im.info.get("id_section", "")) id_len = len(id_section) if id_len > 255: id_len = 255 id_section = id_section[:255] warnings.warn("id_section has been trimmed to 255 characters") if colormaptype: palette = im.im.getpalette("RGB", "BGR") colormaplength, colormapentry = len(palette) // 3, 24 else: colormaplength, colormapentry = 0, 0 if im.mode in ("LA", "RGBA"): flags = 8 else: flags = 0 orientation = im.encoderinfo.get("orientation", im.info.get("orientation", -1)) if orientation > 0: flags = flags | 0x20 fp.write( o8(id_len) + o8(colormaptype) + o8(imagetype) + o16(0) # colormapfirst + o16(colormaplength) + o8(colormapentry) + o16(0) + o16(0) + o16(im.size[0]) + o16(im.size[1]) + o8(bits) + o8(flags) ) if id_section: fp.write(id_section) if colormaptype: fp.write(palette) if rle: ImageFile._save( im, fp, [ImageFile._Tile("tga_rle", (0, 0) + im.size, 0, (rawmode, orientation))], ) else: ImageFile._save( im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, orientation))], ) # write targa version 2 footer fp.write(b"\000" * 8 + b"TRUEVISION-XFILE." + b"\000") # # -------------------------------------------------------------------- # Registry Image.register_open(TgaImageFile.format, TgaImageFile) Image.register_save(TgaImageFile.format, _save) Image.register_extensions(TgaImageFile.format, [".tga", ".icb", ".vda", ".vst"]) Image.register_mime(TgaImageFile.format, "image/x-tga")
TgaImageFile
python
pytorch__pytorch
test/fx/test_common_passes.py
{ "start": 1576, "end": 2801 }
class ____(TestCase): @parametrize( "common_pass,f,device", itertools.product(Passes, Test_Cases, Devices), name_fn ) def test_correctness(self, common_pass, f, device): inp = torch.randn(10, device=device) traced_m = make_fx(f)(inp) P = common_pass() res = P(traced_m) modified_m = res.graph_module assert isinstance(modified_m, GraphModule) inp_copy = inp.clone() expected = f(inp) result = modified_m(inp_copy) self.assertEqual(result, expected) @parametrize( "common_pass,f,device", itertools.product(Passes, Factory_Test_Cases, Devices), name_fn, ) def test_correctness_factory(self, common_pass, f, device): inp = torch.randn(10, device=device) traced_m = make_fx(f)(inp, device) P = common_pass() res = P(traced_m) modified_m = res.graph_module assert isinstance(modified_m, GraphModule) inp_copy = inp.clone() expected = f(inp, device) result = modified_m(inp_copy, device) self.assertEqual(result, expected) if __name__ == "__main__": raise_on_run_directly("test/test_fx.py")
TestCommonPass
python
ageron__handson-ml
future_encoders.py
{ "start": 4709, "end": 8047 }
class ____(BaseEstimator, TransformerMixin): """ Base class for encoders that includes the code to categorize and transform the input features. """ def _fit(self, X, handle_unknown='error'): X_temp = check_array(X, dtype=None) if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_): X = check_array(X, dtype=np.object) else: X = X_temp n_samples, n_features = X.shape if self.categories != 'auto': for cats in self.categories: if not np.all(np.sort(cats) == np.array(cats)): raise ValueError("Unsorted categories are not yet " "supported") if len(self.categories) != n_features: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self._label_encoders_ = [LabelEncoder() for _ in range(n_features)] for i in range(n_features): le = self._label_encoders_[i] Xi = X[:, i] if self.categories == 'auto': le.fit(Xi) else: if handle_unknown == 'error': valid_mask = np.in1d(Xi, self.categories[i]) if not np.all(valid_mask): diff = np.unique(Xi[~valid_mask]) msg = ("Found unknown categories {0} in column {1}" " during fit".format(diff, i)) raise ValueError(msg) le.classes_ = np.array(self.categories[i]) self.categories_ = [le.classes_ for le in self._label_encoders_] def _transform(self, X, handle_unknown='error'): X_temp = check_array(X, dtype=None) if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_): X = check_array(X, dtype=np.object) else: X = X_temp _, n_features = X.shape X_int = np.zeros_like(X, dtype=np.int) X_mask = np.ones_like(X, dtype=np.bool) for i in range(n_features): Xi = X[:, i] valid_mask = np.in1d(Xi, self.categories_[i]) if not np.all(valid_mask): if handle_unknown == 'error': diff = np.unique(X[~valid_mask, i]) msg = ("Found unknown categories {0} in column {1}" " during transform".format(diff, i)) raise ValueError(msg) else: # Set the problematic rows to an acceptable value and # continue `The rows are marked `X_mask` and will be # removed later. X_mask[:, i] = valid_mask Xi = Xi.copy() Xi[~valid_mask] = self.categories_[i][0] X_int[:, i] = self._label_encoders_[i].transform(Xi) return X_int, X_mask WARNING_MSG = ( "The handling of integer data will change in the future. Currently, the " "categories are determined based on the range [0, max(values)], while " "in the future they will be determined based on the unique values.\n" "If you want the future behaviour, you can specify \"categories='auto'\"." )
_BaseEncoder
python
imageio__imageio
imageio/plugins/freeimagemulti.py
{ "start": 360, "end": 2873 }
class ____(FreeimageFormat): """Base class for freeimage formats that support multiple images.""" _modes = "iI" _fif = -1 class Reader(Format.Reader): def _open(self, flags=0): flags = int(flags) # Create bitmap self._bm = fi.create_multipage_bitmap( self.request.filename, self.format.fif, flags ) self._bm.load_from_filename(self.request.get_local_filename()) def _close(self): self._bm.close() def _get_length(self): return len(self._bm) def _get_data(self, index): sub = self._bm.get_page(index) try: return sub.get_image_data(), sub.get_meta_data() finally: sub.close() def _get_meta_data(self, index): index = index or 0 if index < 0 or index >= len(self._bm): raise IndexError() sub = self._bm.get_page(index) try: return sub.get_meta_data() finally: sub.close() # -- class Writer(FreeimageFormat.Writer): def _open(self, flags=0): # Set flags self._flags = flags = int(flags) # Instantiate multi-page bitmap self._bm = fi.create_multipage_bitmap( self.request.filename, self.format.fif, flags ) self._bm.save_to_filename(self.request.get_local_filename()) def _close(self): # Close bitmap self._bm.close() def _append_data(self, im, meta): # Prepare data if im.ndim == 3 and im.shape[-1] == 1: im = im[:, :, 0] im = image_as_uint(im, bitdepth=8) # Create sub bitmap sub1 = fi.create_bitmap(self._bm._filename, self.format.fif) # Let subclass add data to bitmap, optionally return new sub2 = self._append_bitmap(im, meta, sub1) # Add self._bm.append_bitmap(sub2) sub2.close() if sub1 is not sub2: sub1.close() def _append_bitmap(self, im, meta, bitmap): # Set data bitmap.allocate(im) bitmap.set_image_data(im) bitmap.set_meta_data(meta) # Return that same bitmap return bitmap def _set_meta_data(self, meta): pass # ignore global meta data
FreeimageMulti
python
coleifer__peewee
playhouse/flask_utils.py
{ "start": 3069, "end": 8197 }
class ____(object): """ Convenience wrapper for configuring a Peewee database for use with a Flask application. Provides a base `Model` class and registers handlers to manage the database connection during the request/response cycle. Usage:: from flask import Flask from peewee import * from playhouse.flask_utils import FlaskDB # The database can be specified using a database URL, or you can pass a # Peewee database instance directly: DATABASE = 'postgresql:///my_app' DATABASE = PostgresqlDatabase('my_app') # If we do not want connection-management on any views, we can specify # the view names using FLASKDB_EXCLUDED_ROUTES. The db connection will # not be opened/closed automatically when these views are requested: FLASKDB_EXCLUDED_ROUTES = ('logout',) app = Flask(__name__) app.config.from_object(__name__) # Now we can configure our FlaskDB: flask_db = FlaskDB(app) # Or use the "deferred initialization" pattern: flask_db = FlaskDB() flask_db.init_app(app) # The `flask_db` provides a base Model-class for easily binding models # to the configured database: class User(flask_db.Model): email = CharField() """ def __init__(self, app=None, database=None, model_class=Model, excluded_routes=None): self.database = None # Reference to actual Peewee database instance. self.base_model_class = model_class self._app = app self._db = database # dict, url, Database, or None (default). self._excluded_routes = excluded_routes or () if app is not None: self.init_app(app) def init_app(self, app): self._app = app if self._db is None: if 'DATABASE' in app.config: initial_db = app.config['DATABASE'] elif 'DATABASE_URL' in app.config: initial_db = app.config['DATABASE_URL'] else: raise ValueError('Missing required configuration data for ' 'database: DATABASE or DATABASE_URL.') else: initial_db = self._db if 'FLASKDB_EXCLUDED_ROUTES' in app.config: self._excluded_routes = app.config['FLASKDB_EXCLUDED_ROUTES'] self._load_database(app, initial_db) self._register_handlers(app) def _load_database(self, app, config_value): if isinstance(config_value, Database): database = config_value elif isinstance(config_value, dict): database = self._load_from_config_dict(dict(config_value)) else: # Assume a database connection URL. database = db_url_connect(config_value) if isinstance(self.database, Proxy): self.database.initialize(database) else: self.database = database def _load_from_config_dict(self, config_dict): try: name = config_dict.pop('name') engine = config_dict.pop('engine') except KeyError: raise RuntimeError('DATABASE configuration must specify a ' '`name` and `engine`.') if '.' in engine: path, class_name = engine.rsplit('.', 1) else: path, class_name = 'peewee', engine try: __import__(path) module = sys.modules[path] database_class = getattr(module, class_name) assert issubclass(database_class, Database) except ImportError: raise RuntimeError('Unable to import %s' % engine) except AttributeError: raise RuntimeError('Database engine not found %s' % engine) except AssertionError: raise RuntimeError('Database engine not a subclass of ' 'peewee.Database: %s' % engine) return database_class(name, **config_dict) def _register_handlers(self, app): app.before_request(self.connect_db) app.teardown_request(self.close_db) def get_model_class(self): if self.database is None: raise RuntimeError('Database must be initialized.') class BaseModel(self.base_model_class): class Meta: database = self.database return BaseModel @property def Model(self): if self._app is None: database = getattr(self, 'database', None) if database is None: self.database = Proxy() if not hasattr(self, '_model_class'): self._model_class = self.get_model_class() return self._model_class def connect_db(self): if self._excluded_routes and request.endpoint in self._excluded_routes: return self.database.connect() def close_db(self, exc): if self._excluded_routes and request.endpoint in self._excluded_routes: return if not self.database.is_closed(): self.database.close()
FlaskDB
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_page_breaks03.py
{ "start": 315, "end": 1201 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("page_breaks03.xlsx") self.ignore_files = [ "xl/printerSettings/printerSettings1.bin", "xl/worksheets/_rels/sheet1.xml.rels", ] self.ignore_elements = { "[Content_Types].xml": ['<Default Extension="bin"'], "xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"], } def test_create_file(self): """Test the creation of a simple XlsxWriter file with page breaks.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.set_h_pagebreaks(range(0, 1025)) worksheet.write("A1", "Foo") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
great-expectations__great_expectations
great_expectations/core/util.py
{ "start": 10677, "end": 14619 }
class ____: """ Methods for converting Databricks Filesystem (DBFS) paths """ @staticmethod def convert_to_file_semantics_version(path: str) -> str: if re.search(r"^dbfs:", path): return path.replace("dbfs:", "/dbfs", 1) if re.search("^/dbfs", path): return path raise ValueError("Path should start with either /dbfs or dbfs:") # noqa: TRY003 # FIXME CoP @staticmethod def convert_to_protocol_version(path: str) -> str: if re.search(r"^\/dbfs", path): candidate = path.replace("/dbfs", "dbfs:", 1) if candidate == "dbfs:": # Must add trailing slash return "dbfs:/" return candidate if re.search(r"^dbfs:", path): if path == "dbfs:": # Must add trailing slash return "dbfs:/" return path raise ValueError("Path should start with either /dbfs or dbfs:") # noqa: TRY003 # FIXME CoP def sniff_s3_compression(s3_url: S3Url) -> Union[str, None]: """Attempts to get read_csv compression from s3_url""" return _SUFFIX_TO_PD_KWARG.get(s3_url.suffix) if s3_url.suffix else None def get_or_create_spark_application( spark_config: Optional[dict[str, str]] = None, force_reuse_spark_context: Optional[bool] = None, ) -> pyspark.SparkSession: from great_expectations.execution_engine import SparkDFExecutionEngine # deprecated-v1.0.0 warnings.warn( "Utility method get_or_create_spark_application() is deprecated and will be removed in v1.0.0. " # noqa: E501 # FIXME CoP "Please pass your spark_config to the relevant Spark Datasource, or create your Spark Session outside of GX.", # noqa: E501 # FIXME CoP category=DeprecationWarning, ) if force_reuse_spark_context is not None: # deprecated-v1.0.0 warnings.warn( "force_reuse_spark_context is deprecated and will be removed in version 1.0. " "In environments that allow it, the existing Spark context will be reused, adding the " "spark_config options that have been passed. If the Spark context cannot be updated with " # noqa: E501 # FIXME CoP "the spark_config, the context will be stopped and restarted with the new spark_config.", # noqa: E501 # FIXME CoP category=DeprecationWarning, ) return SparkDFExecutionEngine.get_or_create_spark_session( spark_config=spark_config # type:ignore[arg-type] ) def get_or_create_spark_session( spark_config: Optional[dict[str, str]] = None, ) -> pyspark.SparkSession: """Obtains Spark session if it already exists; otherwise creates Spark session and returns it to caller. Args: spark_config: Dictionary containing Spark configuration (string-valued keys mapped to string-valued properties). Returns: SparkSession """ # noqa: E501 # FIXME CoP from great_expectations.execution_engine import SparkDFExecutionEngine # deprecated-v1.0.0 warnings.warn( "Utility method get_or_create_spark_session() is deprecated and will be removed in v1.0.0. " "Please pass your spark_config to the relevant Spark Datasource, or create your Spark Session outside of GX.", # noqa: E501 # FIXME CoP category=DeprecationWarning, ) return SparkDFExecutionEngine.get_or_create_spark_session( spark_config=spark_config or {}, # type: ignore[arg-type] # FIXME CoP ) def get_sql_dialect_floating_point_infinity_value(schema: str, negative: bool = False) -> float: res: Optional[dict] = SCHEMAS.get(schema) if res is None: if negative: return -np.inf else: return np.inf else: # noqa: PLR5501 # FIXME CoP if negative: return res["NegativeInfinity"] else: return res["PositiveInfinity"]
DBFSPath
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/cloud_tasks.py
{ "start": 2125, "end": 2303 }
class ____(BaseGoogleLink): """Helper class for constructing Cloud Task Link.""" name = "Cloud Tasks" key = "cloud_task" format_str = CLOUD_TASKS_LINK
CloudTasksLink
python
apache__airflow
providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_asb.py
{ "start": 20614, "end": 24348 }
class ____: def test_init(self): """ Test init by creating ASBReceiveSubscriptionMessageOperator with task id, topic_name, subscription_name, batch and asserting with values """ asb_subscription_receive_message = ASBReceiveSubscriptionMessageOperator( task_id="asb_subscription_receive_message", topic_name=TOPIC_NAME, subscription_name=SUBSCRIPTION_NAME, max_message_count=10, ) assert asb_subscription_receive_message.task_id == "asb_subscription_receive_message" assert asb_subscription_receive_message.topic_name == TOPIC_NAME assert asb_subscription_receive_message.subscription_name == SUBSCRIPTION_NAME assert asb_subscription_receive_message.max_message_count == 10 @mock.patch("airflow.providers.microsoft.azure.hooks.asb.MessageHook.get_conn") def test_receive_message_queue(self, mock_get_conn): """ Test ASBReceiveSubscriptionMessageOperator by mock connection, values and the service bus receive message """ asb_subscription_receive_message = ASBReceiveSubscriptionMessageOperator( task_id="asb_subscription_receive_message", topic_name=TOPIC_NAME, subscription_name=SUBSCRIPTION_NAME, max_message_count=10, ) asb_subscription_receive_message.execute(None) expected_calls = [ mock.call() .__enter__() .get_subscription_receiver(SUBSCRIPTION_NAME, TOPIC_NAME) .__enter__() .receive_messages(max_message_count=10, max_wait_time=5) .get_subscription_receiver(SUBSCRIPTION_NAME, TOPIC_NAME) .__exit__() .mock_call() .__exit__ ] mock_get_conn.assert_has_calls(expected_calls) @mock.patch("airflow.providers.microsoft.azure.hooks.asb.MessageHook.get_conn") def test_receive_message_queue_callback(self, mock_get_conn): """ Test ASBReceiveSubscriptionMessageOperator by mock connection, values and the service bus receive message """ mock_sb_message0 = ServiceBusMessage("Test message 0") mock_sb_message1 = ServiceBusMessage("Test message 1") mock_get_conn.return_value.__enter__.return_value.get_subscription_receiver.return_value.__enter__.return_value.receive_messages.return_value = [ mock_sb_message0, mock_sb_message1, ] messages_received = [] def message_callback(msg: ServiceBusMessage, context: Context): messages_received.append(msg) assert context is not None print(msg) asb_subscription_receive_message = ASBReceiveSubscriptionMessageOperator( task_id="asb_subscription_receive_message", topic_name=TOPIC_NAME, subscription_name=SUBSCRIPTION_NAME, max_message_count=10, message_callback=message_callback, ) asb_subscription_receive_message.execute(Context()) expected_calls = [ mock.call() .__enter__() .get_subscription_receiver(SUBSCRIPTION_NAME, TOPIC_NAME) .__enter__() .receive_messages(max_message_count=10, max_wait_time=5) .get_subscription_receiver(SUBSCRIPTION_NAME, TOPIC_NAME) .__exit__() .mock_call() .__exit__ ] mock_get_conn.assert_has_calls(expected_calls) assert len(messages_received) == 2 assert messages_received[0] == mock_sb_message0 assert messages_received[1] == mock_sb_message1
TestASBSubscriptionReceiveMessageOperator
python
eriklindernoren__ML-From-Scratch
mlfromscratch/unsupervised_learning/apriori.py
{ "start": 313, "end": 7906 }
class ____(): """A method for determining frequent itemsets in a transactional database and also for generating rules for those itemsets. Parameters: ----------- min_sup: float The minimum fraction of transactions an itemets needs to occur in to be deemed frequent min_conf: float: The minimum fraction of times the antecedent needs to imply the concequent to justify rule """ def __init__(self, min_sup=0.3, min_conf=0.81): self.min_sup = min_sup self.min_conf = min_conf self.freq_itemsets = None # List of freqeuent itemsets self.transactions = None # List of transactions def _calculate_support(self, itemset): count = 0 for transaction in self.transactions: if self._transaction_contains_items(transaction, itemset): count += 1 support = count / len(self.transactions) return support def _get_frequent_itemsets(self, candidates): """ Prunes the candidates that are not frequent => returns list with only frequent itemsets """ frequent = [] # Find frequent items for itemset in candidates: support = self._calculate_support(itemset) if support >= self.min_sup: frequent.append(itemset) return frequent def _has_infrequent_itemsets(self, candidate): """ True or false depending on the candidate has any subset with size k - 1 that is not in the frequent itemset """ k = len(candidate) # Find all combinations of size k-1 in candidate # E.g [1,2,3] => [[1,2],[1,3],[2,3]] subsets = list(itertools.combinations(candidate, k - 1)) for t in subsets: # t - is tuple. If size == 1 get the element subset = list(t) if len(t) > 1 else t[0] if not subset in self.freq_itemsets[-1]: return True return False def _generate_candidates(self, freq_itemset): """ Joins the elements in the frequent itemset and prunes resulting sets if they contain subsets that have been determined to be infrequent. """ candidates = [] for itemset1 in freq_itemset: for itemset2 in freq_itemset: # Valid if every element but the last are the same # and the last element in itemset1 is smaller than the last # in itemset2 valid = False single_item = isinstance(itemset1, int) if single_item and itemset1 < itemset2: valid = True elif not single_item and np.array_equal(itemset1[:-1], itemset2[:-1]) and itemset1[-1] < itemset2[-1]: valid = True if valid: # JOIN: Add the last element in itemset2 to itemset1 to # create a new candidate if single_item: candidate = [itemset1, itemset2] else: candidate = itemset1 + [itemset2[-1]] # PRUNE: Check if any subset of candidate have been determined # to be infrequent infrequent = self._has_infrequent_itemsets(candidate) if not infrequent: candidates.append(candidate) return candidates def _transaction_contains_items(self, transaction, items): """ True or false depending on each item in the itemset is in the transaction """ # If items is in fact only one item if isinstance(items, int): return items in transaction # Iterate through list of items and make sure that # all items are in the transaction for item in items: if not item in transaction: return False return True def find_frequent_itemsets(self, transactions): """ Returns the set of frequent itemsets in the list of transactions """ self.transactions = transactions # Get all unique items in the transactions unique_items = set(item for transaction in self.transactions for item in transaction) # Get the frequent items self.freq_itemsets = [self._get_frequent_itemsets(unique_items)] while(True): # Generate new candidates from last added frequent itemsets candidates = self._generate_candidates(self.freq_itemsets[-1]) # Get the frequent itemsets among those candidates frequent_itemsets = self._get_frequent_itemsets(candidates) # If there are no frequent itemsets we're done if not frequent_itemsets: break # Add them to the total list of frequent itemsets and start over self.freq_itemsets.append(frequent_itemsets) # Flatten the array and return every frequent itemset frequent_itemsets = [ itemset for sublist in self.freq_itemsets for itemset in sublist] return frequent_itemsets def _rules_from_itemset(self, initial_itemset, itemset): """ Recursive function which returns the rules where confidence >= min_confidence Starts with large itemset and recursively explores rules for subsets """ rules = [] k = len(itemset) # Get all combinations of sub-itemsets of size k - 1 from itemset # E.g [1,2,3] => [[1,2],[1,3],[2,3]] subsets = list(itertools.combinations(itemset, k - 1)) support = self._calculate_support(initial_itemset) for antecedent in subsets: # itertools.combinations returns tuples => convert to list antecedent = list(antecedent) antecedent_support = self._calculate_support(antecedent) # Calculate the confidence as sup(A and B) / sup(B), if antecedent # is B in an itemset of A and B confidence = float("{0:.2f}".format(support / antecedent_support)) if confidence >= self.min_conf: # The concequent is the initial_itemset except for antecedent concequent = [itemset for itemset in initial_itemset if not itemset in antecedent] # If single item => get item if len(antecedent) == 1: antecedent = antecedent[0] if len(concequent) == 1: concequent = concequent[0] # Create new rule rule = Rule( antecedent=antecedent, concequent=concequent, confidence=confidence, support=support) rules.append(rule) # If there are subsets that could result in rules # recursively add rules from subsets if k - 1 > 1: rules += self._rules_from_itemset(initial_itemset, antecedent) return rules def generate_rules(self, transactions): self.transactions = transactions frequent_itemsets = self.find_frequent_itemsets(transactions) # Only consider itemsets of size >= 2 items frequent_itemsets = [itemset for itemset in frequent_itemsets if not isinstance( itemset, int)] rules = [] for itemset in frequent_itemsets: rules += self._rules_from_itemset(itemset, itemset) # Remove empty values return rules
Apriori
python
PrefectHQ__prefect
tests/test_flows.py
{ "start": 7018, "end": 8434 }
class ____: def test_flow_decorator_initializes(self): # TODO: We should cover initialization with a task runner once introduced @flow(name="foo", version="B", flow_run_name="hi") def my_flow(): return "bar" assert isinstance(my_flow, Flow) assert my_flow.name == "foo" assert my_flow.version == "B" assert my_flow.fn() == "bar" assert my_flow.flow_run_name == "hi" def test_flow_decorator_initializes_with_callable_flow_run_name(self): @flow(flow_run_name=lambda: "hi") def my_flow(): return "bar" assert isinstance(my_flow, Flow) assert my_flow.fn() == "bar" assert my_flow.flow_run_name() == "hi" def test_flow_decorator_sets_default_version(self): my_flow = flow(flatdict_to_dict) assert my_flow.version == file_hash(flatdict_to_dict.__globals__["__file__"]) def test_invalid_run_name(self): class InvalidFlowRunNameArg: def format(*args, **kwargs): pass with pytest.raises( TypeError, match=( "Expected string or callable for 'flow_run_name'; got" " InvalidFlowRunNameArg instead." ), ): @flow(flow_run_name=InvalidFlowRunNameArg()) def flow_with_illegal_run_name(): pass
TestDecorator
python
cython__cython
Cython/Debugger/libcython.py
{ "start": 35148, "end": 35800 }
class ____(CythonCommand): """ Go up a Cython, Python or relevant C frame. """ name = 'cy up' _command = 'up' @libpython.dont_suppress_errors def invoke(self, *args): try: gdb.execute(self._command, to_string=True) while not self.is_relevant_function(gdb.selected_frame()): gdb.execute(self._command, to_string=True) except RuntimeError as e: raise gdb.GdbError(*e.args) frame = gdb.selected_frame() index = 0 while frame: frame = frame.older() index += 1 self.print_stackframe(index=index - 1)
CyUp
python
airbytehq__airbyte
airbyte-integrations/connectors/source-recharge/unit_tests/integration/streams/test_bundle_selections.py
{ "start": 512, "end": 1844 }
class ____(StreamTestCase): _STREAM_NAME = "bundle_selections" @HttpMocker() def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None: http_mocker.get( self.stream_request().with_limit(250).with_updated_at_min(START_DATE).build(), get_stream_response(_STREAM_NAME).with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(), ) output = read_full_refresh(self._config, _STREAM_NAME) assert len(output.records) == 1 @HttpMocker() def test_given_multiple_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None: http_mocker.get( self.stream_request().with_limit(250).with_next_page_token(NEXT_PAGE_TOKEN).build(), get_stream_response(_STREAM_NAME).with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(), ) http_mocker.get( self.stream_request().with_limit(250).with_updated_at_min(START_DATE).build(), get_stream_response(_STREAM_NAME).with_pagination().with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(), ) output = read_full_refresh(self._config, _STREAM_NAME) assert len(output.records) == 2 @freezegun.freeze_time(NOW.isoformat())
TestFullRefresh
python
sanic-org__sanic
sanic/server/websockets/frame.py
{ "start": 356, "end": 11484 }
class ____: """ Assemble a message from frames. Code borrowed from aaugustin/websockets project: https://github.com/aaugustin/websockets/blob/6eb98dd8fa5b2c896b9f6be7e8d117708da82a39/src/websockets/sync/messages.py """ __slots__ = ( "protocol", "read_mutex", "write_mutex", "message_complete", "message_fetched", "get_in_progress", "decoder", "completed_queue", "chunks", "chunks_queue", "paused", "get_id", "put_id", ) if TYPE_CHECKING: protocol: "WebsocketImplProtocol" read_mutex: asyncio.Lock write_mutex: asyncio.Lock message_complete: asyncio.Event message_fetched: asyncio.Event completed_queue: asyncio.Queue get_in_progress: bool decoder: Optional[codecs.IncrementalDecoder] # For streaming chunks rather than messages: chunks: list[Data] chunks_queue: Optional[asyncio.Queue[Optional[Data]]] paused: bool def __init__(self, protocol) -> None: self.protocol = protocol self.read_mutex = asyncio.Lock() self.write_mutex = asyncio.Lock() self.completed_queue = asyncio.Queue(maxsize=1) # type: asyncio.Queue[Data] # put() sets this event to tell get() that a message can be fetched. self.message_complete = asyncio.Event() # get() sets this event to let put() self.message_fetched = asyncio.Event() # This flag prevents concurrent calls to get() by user code. self.get_in_progress = False # Decoder for text frames, None for binary frames. self.decoder = None # Buffer data from frames belonging to the same message. self.chunks = [] # When switching from "buffering" to "streaming", we use a thread-safe # queue for transferring frames from the writing thread (library code) # to the reading thread (user code). We're buffering when chunks_queue # is None and streaming when it's a Queue. None is a sentinel # value marking the end of the stream, superseding message_complete. # Stream data from frames belonging to the same message. self.chunks_queue = None # Flag to indicate we've paused the protocol self.paused = False async def get(self, timeout: Optional[float] = None) -> Optional[Data]: """ Read the next message. :meth:`get` returns a single :class:`str` or :class:`bytes`. If the :message was fragmented, :meth:`get` waits until the last frame is received, then it reassembles the message. If ``timeout`` is set and elapses before a complete message is received, :meth:`get` returns ``None``. """ completed: bool async with self.read_mutex: if timeout is not None and timeout <= 0: if not self.message_complete.is_set(): return None if self.get_in_progress: # This should be guarded against with the read_mutex, # exception is only here as a failsafe raise ServerError( "Called get() on Websocket frame assembler " "while asynchronous get is already in progress." ) self.get_in_progress = True # If the message_complete event isn't set yet, release the lock to # allow put() to run and eventually set it. # Locking with get_in_progress ensures only one task can get here. if timeout is None: completed = await self.message_complete.wait() elif timeout <= 0: completed = self.message_complete.is_set() else: try: await asyncio.wait_for( self.message_complete.wait(), timeout=timeout ) except asyncio.TimeoutError: ... finally: completed = self.message_complete.is_set() # Unpause the transport, if its paused if self.paused: self.protocol.resume_frames() self.paused = False if not self.get_in_progress: # no cov # This should be guarded against with the read_mutex, # exception is here as a failsafe raise ServerError( "State of Websocket frame assembler was modified while an " "asynchronous get was in progress." ) self.get_in_progress = False # Waiting for a complete message timed out. if not completed: return None if not self.message_complete.is_set(): return None self.message_complete.clear() joiner: Data = b"" if self.decoder is None else "" # mypy cannot figure out that chunks have the proper type. message: Data = joiner.join(self.chunks) # type: ignore if self.message_fetched.is_set(): # This should be guarded against with the read_mutex, # and get_in_progress check, this exception is here # as a failsafe raise ServerError( "Websocket get() found a message when " "state was already fetched." ) self.message_fetched.set() self.chunks = [] # this should already be None, but set it here for safety self.chunks_queue = None return message async def get_iter(self) -> AsyncIterator[Data]: """ Stream the next message. Iterating the return value of :meth:`get_iter` yields a :class:`str` or :class:`bytes` for each frame in the message. """ async with self.read_mutex: if self.get_in_progress: # This should be guarded against with the read_mutex, # exception is only here as a failsafe raise ServerError( "Called get_iter on Websocket frame assembler " "while asynchronous get is already in progress." ) self.get_in_progress = True chunks = self.chunks self.chunks = [] self.chunks_queue = asyncio.Queue() # Sending None in chunk_queue supersedes setting message_complete # when switching to "streaming". If message is already complete # when the switch happens, put() didn't send None, so we have to. if self.message_complete.is_set(): await self.chunks_queue.put(None) # Locking with get_in_progress ensures only one task can get here for c in chunks: yield c while True: chunk = await self.chunks_queue.get() if chunk is None: break yield chunk # Unpause the transport, if its paused if self.paused: self.protocol.resume_frames() self.paused = False if not self.get_in_progress: # no cov # This should be guarded against with the read_mutex, # exception is here as a failsafe raise ServerError( "State of Websocket frame assembler was modified while an " "asynchronous get was in progress." ) self.get_in_progress = False if not self.message_complete.is_set(): # no cov # This should be guarded against with the read_mutex, # exception is here as a failsafe raise ServerError( "Websocket frame assembler chunks queue ended before " "message was complete." ) self.message_complete.clear() if self.message_fetched.is_set(): # no cov # This should be guarded against with the read_mutex, # and get_in_progress check, this exception is # here as a failsafe raise ServerError( "Websocket get_iter() found a message when state was " "already fetched." ) self.message_fetched.set() # this should already be empty, but set it here for safety self.chunks = [] self.chunks_queue = None async def put(self, frame: Frame) -> None: """ Add ``frame`` to the next message. When ``frame`` is the final frame in a message, :meth:`put` waits until the message is fetched, either by calling :meth:`get` or by iterating the return value of :meth:`get_iter`. :meth:`put` assumes that the stream of frames respects the protocol. If it doesn't, the behavior is undefined. """ async with self.write_mutex: if frame.opcode is Opcode.TEXT: self.decoder = UTF8Decoder(errors="strict") elif frame.opcode is Opcode.BINARY: self.decoder = None elif frame.opcode is Opcode.CONT: pass else: # Ignore control frames. return data: Data if self.decoder is not None: data = self.decoder.decode(frame.data, frame.fin) else: data = frame.data if self.chunks_queue is None: self.chunks.append(data) else: await self.chunks_queue.put(data) if not frame.fin: return if not self.get_in_progress: # nobody is waiting for this frame, so try to pause subsequent # frames at the protocol level self.paused = self.protocol.pause_frames() # Message is complete. Wait until it's fetched to return. if self.chunks_queue is not None: await self.chunks_queue.put(None) if self.message_complete.is_set(): # This should be guarded against with the write_mutex raise ServerError( "Websocket put() got a new message when a message was " "already in its chamber." ) self.message_complete.set() # Signal to get() it can serve the if self.message_fetched.is_set(): # This should be guarded against with the write_mutex raise ServerError( "Websocket put() got a new message when the previous " "message was not yet fetched." ) # Allow get() to run and eventually set the event. await self.message_fetched.wait() self.message_fetched.clear() self.decoder = None
WebsocketFrameAssembler
python
django__django
django/core/checks/messages.py
{ "start": 70, "end": 1654 }
class ____: def __init__(self, level, msg, hint=None, obj=None, id=None): if not isinstance(level, int): raise TypeError("The first argument should be level.") self.level = level self.msg = msg self.hint = hint self.obj = obj self.id = id def __eq__(self, other): return isinstance(other, self.__class__) and all( getattr(self, attr) == getattr(other, attr) for attr in ["level", "msg", "hint", "obj", "id"] ) def __str__(self): from django.db import models if self.obj is None: obj = "?" elif isinstance(self.obj, models.base.ModelBase): # We need to hardcode ModelBase and Field cases because its __str__ # method doesn't return "applabel.modellabel" and cannot be # changed. obj = self.obj._meta.label else: obj = str(self.obj) id = "(%s) " % self.id if self.id else "" hint = "\n\tHINT: %s" % self.hint if self.hint else "" return "%s: %s%s%s" % (obj, id, self.msg, hint) def __repr__(self): return "<%s: level=%r, msg=%r, hint=%r, obj=%r, id=%r>" % ( self.__class__.__name__, self.level, self.msg, self.hint, self.obj, self.id, ) def is_serious(self, level=ERROR): return self.level >= level def is_silenced(self): from django.conf import settings return self.id in settings.SILENCED_SYSTEM_CHECKS
CheckMessage
python
wandb__wandb
wandb/vendor/pygments/lexers/templates.py
{ "start": 16597, "end": 17027 }
class ____(DelegatingLexer): """ Subclass of the `MyghtyLexer` that highlights unlexed data with the `HtmlLexer`. .. versionadded:: 0.6 """ name = 'HTML+Myghty' aliases = ['html+myghty'] mimetypes = ['text/html+myghty'] def __init__(self, **options): super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer, **options)
MyghtyHtmlLexer
python
readthedocs__readthedocs.org
readthedocs/api/v2/permissions.py
{ "start": 1676, "end": 2597 }
class ____(BaseHasAPIKey): """ Custom permission to inject the build API key into the request. We completely override the ``has_permission`` method to avoid having to parse and validate the key again on each view. The key is injected in the ``request.build_api_key`` attribute only if it's valid, otherwise it's set to ``None``. This grants read and write access to the API. """ model = BuildAPIKey key_parser = TokenKeyParser() def has_permission(self, request, view): request.build_api_key = None key = self.get_key(request) if not key: return False try: build_api_key = self.model.objects.get_from_key(key) except self.model.DoesNotExist: return False if build_api_key.has_expired: return False request.build_api_key = build_api_key return True
HasBuildAPIKey
python
gevent__gevent
src/gevent/tests/test__local.py
{ "start": 10727, "end": 11306 }
class ____(greentest.TestCase): __timeout__ = None @greentest.ignores_leakcheck def test_provides(self): # https://github.com/gevent/gevent/issues/1122 # pylint:disable=inherit-non-class class IFoo(interface.Interface): pass @interface.implementer(IFoo) class Base(object): pass class Derived(Base, local): pass d = Derived() p = list(interface.providedBy(d)) self.assertEqual([IFoo], p) @greentest.skipOnPurePython("Needs C extension")
TestLocalInterface
python
keras-team__keras
keras/src/backend/torch/optimizers/torch_adagrad.py
{ "start": 147, "end": 1041 }
class ____( torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adagrad ): def _parallel_update_step( self, grads, variables, learning_rate, ): keras_variables = variables variables = [v.value for v in variables] dtype = variables[0].dtype lr = ops.cast(learning_rate, dtype) accumulators = [ self._accumulators[self._get_variable_index(variable)].value for variable in keras_variables ] torch._foreach_add_(accumulators, torch._foreach_mul(grads, grads)) torch._foreach_add_( variables, torch._foreach_div( torch._foreach_mul(grads, lr), torch._foreach_sqrt( torch._foreach_add(accumulators, self.epsilon) ), ), alpha=-1, )
Adagrad
python
aio-libs__aiohttp
aiohttp/client_middleware_digest_auth.py
{ "start": 734, "end": 4775 }
class ____(TypedDict, total=False): realm: str nonce: str qop: str algorithm: str opaque: str domain: str stale: str DigestFunctions: dict[str, Callable[[bytes], "hashlib._Hash"]] = { "MD5": hashlib.md5, "MD5-SESS": hashlib.md5, "SHA": hashlib.sha1, "SHA-SESS": hashlib.sha1, "SHA256": hashlib.sha256, "SHA256-SESS": hashlib.sha256, "SHA-256": hashlib.sha256, "SHA-256-SESS": hashlib.sha256, "SHA512": hashlib.sha512, "SHA512-SESS": hashlib.sha512, "SHA-512": hashlib.sha512, "SHA-512-SESS": hashlib.sha512, } # Compile the regex pattern once at module level for performance _HEADER_PAIRS_PATTERN = re.compile( r'(\w+)\s*=\s*(?:"((?:[^"\\]|\\.)*)"|([^\s,]+))' # | | | | | | | | | || | # +----|--|-|-|--|----|------|----|--||-----|--> alphanumeric key # +--|-|-|--|----|------|----|--||-----|--> maybe whitespace # | | | | | | | || | # +-|-|--|----|------|----|--||-----|--> = (delimiter) # +-|--|----|------|----|--||-----|--> maybe whitespace # | | | | | || | # +--|----|------|----|--||-----|--> group quoted or unquoted # | | | | || | # +----|------|----|--||-----|--> if quoted... # +------|----|--||-----|--> anything but " or \ # +----|--||-----|--> escaped characters allowed # +--||-----|--> or can be empty string # || | # +|-----|--> if unquoted... # +-----|--> anything but , or <space> # +--> at least one char req'd ) # RFC 7616: Challenge parameters to extract CHALLENGE_FIELDS: Final[ tuple[ Literal["realm", "nonce", "qop", "algorithm", "opaque", "domain", "stale"], ... ] ] = ( "realm", "nonce", "qop", "algorithm", "opaque", "domain", "stale", ) # Supported digest authentication algorithms # Use a tuple of sorted keys for predictable documentation and error messages SUPPORTED_ALGORITHMS: Final[tuple[str, ...]] = tuple(sorted(DigestFunctions.keys())) # RFC 7616: Fields that require quoting in the Digest auth header # These fields must be enclosed in double quotes in the Authorization header. # Algorithm, qop, and nc are never quoted per RFC specifications. # This frozen set is used by the template-based header construction to # automatically determine which fields need quotes. QUOTED_AUTH_FIELDS: Final[frozenset[str]] = frozenset( {"username", "realm", "nonce", "uri", "response", "opaque", "cnonce"} ) def escape_quotes(value: str) -> str: """Escape double quotes for HTTP header values.""" return value.replace('"', '\\"') def unescape_quotes(value: str) -> str: """Unescape double quotes in HTTP header values.""" return value.replace('\\"', '"') def parse_header_pairs(header: str) -> dict[str, str]: """ Parse key-value pairs from WWW-Authenticate or similar HTTP headers. This function handles the complex format of WWW-Authenticate header values, supporting both quoted and unquoted values, proper handling of commas in quoted values, and whitespace variations per RFC 7616. Examples of supported formats: - key1="value1", key2=value2 - key1 = "value1" , key2="value, with, commas" - key1=value1,key2="value2" - realm="example.com", nonce="12345", qop="auth" Args: header: The header value string to parse Returns: Dictionary mapping parameter names to their values """ return { stripped_key: unescape_quotes(quoted_val) if quoted_val else unquoted_val for key, quoted_val, unquoted_val in _HEADER_PAIRS_PATTERN.findall(header) if (stripped_key := key.strip()) }
DigestAuthChallenge
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/models/secrets.py
{ "start": 4507, "end": 5133 }
class ____: name: str secret_store: SecretStore file_name: str | None = None def __post_init__(self) -> None: self.value: str = self.secret_store.fetch_secret(self.name) self.value_hash: str = self._get_value_hash(self.value) @staticmethod def _get_value_hash(value: str) -> str: byte_string = value.encode("utf-8") md5_hash = hashlib.md5() md5_hash.update(byte_string) return md5_hash.hexdigest()[:20] def as_dagger_secret(self, dagger_client: DaggerClient) -> DaggerSecret: return dagger_client.set_secret(self.value_hash, self.value)
Secret
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_contextlib.py
{ "start": 14231, "end": 15315 }
class ____(__TestCase): @support.requires_docstrings def test_instance_docs(self): # Issue 19330: ensure context manager instances have good docstrings cm_docstring = closing.__doc__ obj = closing(None) self.assertEqual(obj.__doc__, cm_docstring) def test_closing(self): state = [] with torch._dynamo.error_on_graph_break(False): class C: def close(self): state.append(1) x = C() self.assertEqual(state, []) with closing(x) as y: self.assertEqual(x, y) self.assertEqual(state, [1]) def test_closing_error(self): state = [] with torch._dynamo.error_on_graph_break(False): class C: def close(self): state.append(1) x = C() self.assertEqual(state, []) with self.assertRaises(ZeroDivisionError): with closing(x) as y: self.assertEqual(x, y) 1 / 0 self.assertEqual(state, [1])
ClosingTestCase
python
gevent__gevent
src/gevent/lock.py
{ "start": 5180, "end": 6173 }
class ____(_AtomicSemaphoreMixin, BoundedSemaphore): __doc__ = BoundedSemaphore.__doc__ __slots__ = ( '_lock_lock', ) def release(self): # pylint:disable=useless-super-delegation # This method is duplicated here so that it can get # properly documented. return super(_AtomicBoundedSemaphore, self).release() def _fixup_docstrings(): for c in _AtomicSemaphore, _AtomicBoundedSemaphore: b = c.__mro__[2] assert b.__name__.endswith('Semaphore') and 'Atomic' not in b.__name__ assert c.__doc__ == b.__doc__ for m in 'acquire', 'release', 'wait': c_meth = getattr(c, m) b_meth = getattr(b, m) c_meth.__doc__ = b_meth.__doc__ _fixup_docstrings() del _fixup_docstrings if PURE_PYTHON: Semaphore = _AtomicSemaphore Semaphore.__name__ = 'Semaphore' BoundedSemaphore = _AtomicBoundedSemaphore BoundedSemaphore.__name__ = 'BoundedSemaphore'
_AtomicBoundedSemaphore
python
ray-project__ray
python/ray/data/_internal/execution/operators/actor_pool_map_operator.py
{ "start": 26638, "end": 27488 }
class ____(abc.ABC): def __init__(self, actor_pool: "_ActorPool"): """Initialize the actor task selector. Args: actor_pool: The actor pool to select tasks from. """ self._actor_pool = actor_pool @abstractmethod def select_actors( self, input_queue: BundleQueue, actor_locality_enabled: bool ) -> Iterator[Tuple[RefBundle, ActorHandle]]: """Select actors for bundles in the input queue. Args: input_queue: The input queue to select actors for. actor_locality_enabled: Whether actor locality is enabled. Returns: Iterator of tuples of the bundle and the selected actor for that bundle. Iteration stops when there are no more bundles to be selected in the input queue """ pass
_ActorTaskSelector
python
apache__airflow
providers/openlineage/src/airflow/providers/openlineage/extractors/base.py
{ "start": 3129, "end": 6449 }
class ____(BaseExtractor): """Extractor that uses `get_openlineage_facets_on_start/complete/failure` methods.""" @classmethod def get_operator_classnames(cls) -> list[str]: """ Assign this extractor to *no* operators. Default extractor is chosen not on the classname basis, but by existence of get_openlineage_facets method on operator. """ return [] def _execute_extraction(self) -> OperatorLineage | None: method = getattr(self.operator, OL_METHOD_NAME_START, None) if callable(method): self.log.debug( "Trying to execute '%s' method of '%s'.", OL_METHOD_NAME_START, self.operator.task_type ) return self._get_openlineage_facets(method) self.log.debug( "Operator '%s' does not have '%s' method.", self.operator.task_type, OL_METHOD_NAME_START ) return OperatorLineage() def extract_on_complete(self, task_instance) -> OperatorLineage | None: method = getattr(self.operator, OL_METHOD_NAME_COMPLETE, None) if callable(method): self.log.debug( "Trying to execute '%s' method of '%s'.", OL_METHOD_NAME_COMPLETE, self.operator.task_type ) return self._get_openlineage_facets(method, task_instance) self.log.debug( "Operator '%s' does not have '%s' method.", self.operator.task_type, OL_METHOD_NAME_COMPLETE ) return self.extract() def extract_on_failure(self, task_instance) -> OperatorLineage | None: method = getattr(self.operator, OL_METHOD_NAME_FAIL, None) if callable(method): self.log.debug( "Trying to execute '%s' method of '%s'.", OL_METHOD_NAME_FAIL, self.operator.task_type ) return self._get_openlineage_facets(method, task_instance) self.log.debug( "Operator '%s' does not have '%s' method.", self.operator.task_type, OL_METHOD_NAME_FAIL ) return self.extract_on_complete(task_instance) def _get_openlineage_facets(self, get_facets_method, *args) -> OperatorLineage | None: try: facets: OperatorLineage | None = get_facets_method(*args) if facets is None: self.log.debug("OpenLineage method returned `None`") return None # "rewrite" OperatorLineage to safeguard against different version of the same class # that was existing in openlineage-airflow package outside of Airflow repo return OperatorLineage( inputs=facets.inputs, outputs=facets.outputs, run_facets=facets.run_facets, job_facets=facets.job_facets, ) except ImportError: self.log.exception( "OpenLineage provider method failed to import OpenLineage integration. " "This should not happen." ) except Exception as e: self.log.warning( "OpenLineage method failed to extract data from Operator with the following exception: `%s`", e, ) self.log.debug("OpenLineage extraction failure details:", exc_info=True) return None
DefaultExtractor
python
getsentry__sentry
tests/sentry/api/endpoints/test_custom_rules.py
{ "start": 6476, "end": 12623 }
class ____(APITestCase): """ Tests that calling the endpoint converts the query to a rule returns it and saves it in the db """ endpoint = "sentry-api-0-organization-dynamic_sampling-custom_rules" method = "post" def setUp(self) -> None: super().setUp() self.login_as(user=self.user) self.second_project = self.create_project(organization=self.organization) def test_create(self) -> None: request_data = { "query": "event.type:transaction http.method:POST", "projects": [self.project.id], } resp = self.get_response(self.organization.slug, raw_data=request_data) assert resp.status_code == 200 data = resp.data start_date = datetime.fromisoformat(data["startDate"]) end_date = datetime.fromisoformat(data["endDate"]) assert end_date - start_date == timedelta(days=2) projects = data["projects"] assert projects == [self.project.id] org_id = data["orgId"] assert org_id == self.organization.id # check the database rule_id = data["ruleId"] rules = list(self.organization.customdynamicsamplingrule_set.all()) assert len(rules) == 1 rule = rules[0] assert rule.external_rule_id == rule_id def test_disallow_when_no_project_access(self) -> None: # disable Open Membership self.organization.flags.allow_joinleave = False self.organization.save() # user has no access to the first project user_no_team = self.create_user(is_superuser=False) self.create_member( user=user_no_team, organization=self.organization, role="member", teams=[] ) self.login_as(user_no_team) request_data = { "query": "event.type:transaction http.method:POST", "projects": [self.project.id], } response = self.get_response(self.organization.slug, raw_data=request_data) assert response.status_code == 403, response.data assert response.data == {"detail": "You do not have permission to perform this action."} def test_updates_existing(self) -> None: """ Test that the endpoint updates an existing rule if the same rule condition and projects is given The rule id should be the same """ request_data = { "query": "event.type:transaction", "projects": [self.project.id], } # create rule resp = self.get_response(self.organization.slug, raw_data=request_data) assert resp.status_code == 200 data = resp.data rule_id = data["ruleId"] start_date = datetime.fromisoformat(data["startDate"]) end_date = datetime.fromisoformat(data["endDate"]) assert end_date - start_date == timedelta(days=2) request_data = { "query": "event.type:transaction", "projects": [self.project.id], } # update existing rule resp = self.get_response(self.organization.slug, raw_data=request_data) assert resp.status_code == 200 data = resp.data start_date = datetime.fromisoformat(data["startDate"]) end_date = datetime.fromisoformat(data["endDate"]) assert end_date - start_date >= timedelta(days=2) projects = data["projects"] assert projects == [self.project.id] new_rule_id = data["ruleId"] assert rule_id == new_rule_id @mock.patch("sentry.api.endpoints.custom_rules.schedule_invalidate_project_config") def test_invalidates_project_config( self, mock_invalidate_project_config: mock.MagicMock ) -> None: """ Tests that project rules invalidates all the configurations for the passed projects """ request_data = { "query": "event.type:transaction http.method:POST", "projects": [self.project.id, self.second_project.id], } mock_invalidate_project_config.reset_mock() resp = self.get_response(self.organization.slug, raw_data=request_data) assert resp.status_code == 200 mock_invalidate_project_config.assert_any_call(trigger=mock.ANY, project_id=self.project.id) mock_invalidate_project_config.assert_any_call( trigger=mock.ANY, project_id=self.second_project.id ) @mock.patch("sentry.api.endpoints.custom_rules.schedule_invalidate_project_config") def test_invalidates_organization_config( self, mock_invalidate_project_config: mock.MagicMock ) -> None: """ Tests that org rules invalidates all the configurations for the projects in the organization """ request_data = { "query": "event.type:transaction http.method:POST", "projects": [], } mock_invalidate_project_config.reset_mock() resp = self.get_response(self.organization.slug, raw_data=request_data) assert resp.status_code == 200 mock_invalidate_project_config.assert_called_once_with( trigger=mock.ANY, organization_id=self.organization.id ) @pytest.mark.parametrize( "what,value,valid", [ ("query", "event.type:transaction", True), ("projects", ["abc"], False), ("query", "", True), ], ) def test_custom_rule_serializer(what, value, valid) -> None: """ Test that the serializer works as expected """ data = {"query": "event.type:transaction", "projects": []} data[what] = value serializer = CustomRulesInputSerializer(data=data) assert serializer.is_valid() == valid def test_custom_rule_serializer_creates_org_rule_when_no_projects_given() -> None: """ Test that the serializer creates an org level rule when no projects are given """ data = {"query": "event.type:transaction"} serializer = CustomRulesInputSerializer(data=data) assert serializer.is_valid() # an org level rule has an empty list of projects set assert serializer.validated_data["projects"] == []
CustomRulesEndpoint
python
SmileyChris__easy-thumbnails
demoproject/mainapp/apps.py
{ "start": 36, "end": 146 }
class ____(AppConfig): default_auto_field = "django.db.models.BigAutoField" name = "mainapp"
MainappConfig
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/vision.py
{ "start": 21981, "end": 25335 }
class ____(GoogleCloudBaseOperator): """ Get information associated with a ``Product``. Possible errors: - Returns `NOT_FOUND` if the `Product` does not exist. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:CloudVisionGetProductOperator` :param location: (Required) The region where the Product is located. Valid regions (as of 2019-02-05) are: us-east1, us-west1, europe-west1, asia-east1 :param product_id: (Required) The resource id of this Product. :param project_id: (Optional) The project in which the Product is located. If set to None or missing, the default project_id from the Google Cloud connection is used. :param retry: (Optional) A retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :param metadata: (Optional) Additional metadata that is provided to the method. :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ # [START vision_product_get_template_fields] template_fields: Sequence[str] = ( "location", "project_id", "product_id", "gcp_conn_id", "impersonation_chain", ) # [END vision_product_get_template_fields] def __init__( self, *, location: str, product_id: str, project_id: str = PROVIDE_PROJECT_ID, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: MetaData = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.location = location self.product_id = product_id self.project_id = project_id self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def execute(self, context: Context): hook = CloudVisionHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) return hook.get_product( location=self.location, product_id=self.product_id, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, )
CloudVisionGetProductOperator
python
getsentry__sentry
src/sentry/models/rule.py
{ "start": 5479, "end": 5611 }
class ____(Enum): CREATED = 1 DELETED = 2 UPDATED = 3 ENABLED = 4 DISABLED = 5 @region_silo_model
RuleActivityType
python
xlwings__xlwings
xlwings/main.py
{ "start": 90704, "end": 92316 }
class ____(Ranges): """ Represents the columns of a range. Do not construct this class directly, use :attr:`Range.columns` instead. Example ------- .. code-block:: python import xlwings as xw wb = xw.Book("MyFile.xlsx") sheet1 = wb.sheets[0] myrange = sheet1.range('A1:C4') assert len(myrange.columns) == 3 # or myrange.columns.count myrange.columns[0].value = 'a' assert myrange.columns[2] == sheet1.range('C1:C4') assert myrange.columns(2) == sheet1.range('B1:B4') for c in myrange.columns: print(c.address) """ def __init__(self, rng): self.rng = rng def __len__(self): """ Returns the number of columns. .. versionadded:: 0.9.0 """ return self.rng.shape[1] count = property(__len__) def autofit(self): """ Autofits the width of the columns. """ self.rng.impl.autofit(axis="c") def __iter__(self): for j in range(0, self.rng.shape[1]): yield self.rng[:, j] def __call__(self, key): return self.rng[:, key - 1] def __getitem__(self, key): if isinstance(key, slice): return RangeColumns(rng=self.rng[:, key]) elif isinstance(key, int): return self.rng[:, key] else: raise TypeError( "Indices must be integers or slices, not %s" % type(key).__name__ ) def __repr__(self): return "{}({})".format(self.__class__.__name__, repr(self.rng))
RangeColumns
python
getsentry__sentry
tests/sentry/api/endpoints/test_organization_recent_searches.py
{ "start": 7466, "end": 8990 }
class ____(APITestCase): endpoint = "sentry-api-0-organization-recent-searches" method = "post" @cached_property def organization(self): return self.create_organization() @cached_property def user(self): user = self.create_user("test@test.com") self.create_team(members=[user], organization=self.organization) return user def test(self) -> None: self.login_as(self.user) search_type = 1 query = "something" the_date = datetime(2019, 1, 1, 1, 1, 1, tzinfo=UTC) with freeze_time(the_date): response = self.get_response(self.organization.slug, type=search_type, query=query) assert response.status_code == 201 assert RecentSearch.objects.filter( organization=self.organization, user_id=self.user.id, type=search_type, query=query, last_seen=the_date, ).exists() the_date = datetime(2019, 1, 1, 2, 2, 2, tzinfo=UTC) with freeze_time(the_date): response = self.get_response(self.organization.slug, type=search_type, query=query) assert response.status_code == 204, response.content assert RecentSearch.objects.filter( organization=self.organization, user_id=self.user.id, type=search_type, query=query, last_seen=the_date, ).exists()
RecentSearchesCreateTest
python
charliermarsh__ruff
crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/top_level.py
{ "start": 253, "end": 274 }
class ____(Bar): ...
Foo
python
astropy__astropy
astropy/utils/masked/tests/test_masked.py
{ "start": 33895, "end": 33979 }
class ____(MaskedOperatorTests, LongitudeSetup): pass
TestMaskedLongitudeOperators
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/build_image/steps/java_connectors.py
{ "start": 665, "end": 2963 }
class ____(BuildConnectorImagesBase): """ A step to build Java connector images using the distTar Gradle task. """ async def _run(self, dist_dir: Directory) -> StepResult: dist_tar: File try: dir_files = await dist_dir.entries() tar_files = [f for f in dir_files if f.endswith(".tar")] num_files = len(tar_files) if num_files != 1: error_message = ( "The distribution tar file for the current java connector was not built." if num_files == 0 else "More than one distribution tar file was built for the current java connector." ) return StepResult(step=self, status=StepStatus.FAILURE, stderr=error_message) dist_tar = dist_dir.file(tar_files[0]) except QueryError as e: return StepResult(step=self, status=StepStatus.FAILURE, stderr=str(e)) return await super()._run(dist_tar) async def _build_connector(self, platform: Platform, dist_tar: File) -> Container: return await java.with_airbyte_java_connector(self.context, dist_tar, platform) async def run_connector_build(context: ConnectorContext) -> StepResult: """Create the java connector distribution tar file and build the connector image.""" if context.use_host_gradle_dist_tar and context.is_local: # Special case: use a local dist tar to speed up local development. dist_dir = await context.dagger_client.host().directory(dist_tar_directory_path(context), include=["*.tar"]) # Speed things up by only building for the local platform. return await BuildConnectorImages(context).run(dist_dir) # Default case: distribution tar is built by the dagger pipeline. build_connector_tar_result = await BuildConnectorDistributionTar(context).run() if build_connector_tar_result.status is not StepStatus.SUCCESS: return build_connector_tar_result dist_dir = await build_connector_tar_result.output.directory("build/distributions") return await BuildConnectorImages(context).run(dist_dir) def dist_tar_directory_path(context: ConnectorContext) -> str: return f"{context.connector.code_directory}/build/distributions"
BuildConnectorImages
python
google__pytype
pytype/tests/test_abc2.py
{ "start": 129, "end": 6909 }
class ____(test_base.BaseTest): """Tests for @abc.abstractmethod.""" def test_no_skip_call(self): self.Check( """ import abc class Example(metaclass=abc.ABCMeta): @abc.abstractmethod def foo(self) -> int: return None """, skip_repeat_calls=False, ) def test_multiple_inheritance_builtins(self): self.Check(""" import abc class Foo(object, metaclass=abc.ABCMeta): pass class Bar1(Foo, tuple): pass class Bar2(Foo, bytes): pass class Bar3(Foo, str): pass class Bar4(Foo, bytearray): pass class Bar5(Foo, dict): pass class Bar6(Foo, list): pass class Bar7(Foo, set): pass class Bar8(Foo, frozenset): pass class Bar9(Foo, memoryview): pass class BarA(Foo, range): pass Bar1() Bar2() Bar3() Bar4() Bar5() Bar6() Bar7() Bar8() Bar9(b"") BarA(0) """) def test_abstractproperty(self): ty, errors = self.InferWithErrors(""" import abc class Foo(metaclass=abc.ABCMeta): @abc.abstractproperty def foo(self): return 42 class Bar(Foo): @property def foo(self): return super(Bar, self).foo v1 = Foo().foo # not-instantiable[e] v2 = Bar().foo """) self.assertTypesMatchPytd( ty, """ import abc from typing import Annotated, Any v1 = ... # type: Any v2 = ... # type: int class Bar(Foo): foo = ... # type: Annotated[int, 'property'] class Foo(metaclass=abc.ABCMeta): foo = ... # type: Annotated[Any, 'property'] """, ) self.assertErrorRegexes(errors, {"e": r"Foo.*foo"}) def test_dictviews(self): self.Check(""" from collections import abc from typing import Dict d: Dict[str, int] = {} abc.ItemsView(d) abc.KeysView(d) abc.ValuesView(d) """) def test_instantiate_abstract_class_annotation(self): # When a function parameter is annotated as `Type[A]`, where A is abstract, # presumably the intent is for callers to pass in concrete subclasses of A, # so we should not raise an error if A is instantiated in the body. self.Check(""" import abc from typing import Type class A(metaclass=abc.ABCMeta): @abc.abstractmethod def a(self): pass def f(x: Type[A]): return x() """) def test_instantiate_abstract_pytdclass_annotation(self): # When a function parameter is annotated as `Type[A]`, where A is abstract, # presumably the intent is for callers to pass in concrete subclasses of A, # so we should not raise an error if A is instantiated in the body. with test_utils.Tempdir() as d: d.create_file( "foo.pyi", """ import abc class A(metaclass=abc.ABCMeta): @abc.abstractmethod def a(self) -> None: ... """, ) self.Check( """ import foo from typing import Type def f(x: Type[foo.A]): return x() """, pythonpath=[d.path], ) def test_instantiate_generic_abstract_class(self): self.Check(""" import abc from typing import Generic, Type, TypeVar T = TypeVar('T') class A(Generic[T], abc.ABC): @abc.abstractmethod def a(self): ... def f(x: Type[A[int]]): return x() """) def test_instantiate_abstract_class_in_own_method(self): self.Check(""" import abc class Foo(abc.ABC): @abc.abstractmethod def f(self): ... @classmethod def g(cls): return cls() """) def test_abstract_classmethod(self): self.Check(""" import abc class Foo(abc.ABC): @classmethod @abc.abstractmethod def f(cls) -> str: ... """) def test_bad_abstract_classmethod(self): errors = self.CheckWithErrors(""" import abc class Foo: # ignored-abstractmethod[e] @classmethod @abc.abstractmethod def f(cls) -> str: ... # bad-return-type """) self.assertErrorSequences(errors, {"e": ["on method Foo.f"]}) def test_bad_abstract_pyi_method(self): with self.DepTree([( "foo.pyi", """ import abc class Foo(abc.ABC): @abc.abstractmethod def f(self) -> int: ... """, )]): self.CheckWithErrors(""" import foo class Bar: # ignored-abstractmethod f = foo.Foo.f """) def test_abstract_property(self): # Regression test for a crash when the decorators were applied in the wrong # order. errors = self.CheckWithErrors(""" import abc class Foo(abc.ABC): @abc.abstractmethod # wrong-arg-types[e]>=3.11 @property def f(self) -> str: # wrong-arg-types[e]<3.11 return 'a' @property @abc.abstractmethod def g(self) -> str: return 'a' """) self.assertErrorSequences( errors, {"e": ["Expected", "Callable", "Actual", "property"]} ) def test_instantiate_abcmeta(self): self.Check(""" import abc ABC = abc.ABCMeta('ABC', (object,), {}) class Foo(ABC): @abc.abstractmethod def f(self): pass """) def test_ignored_abstractmethod_nested(self): self.CheckWithErrors(""" import abc def f(): class C: # ignored-abstractmethod @abc.abstractmethod def f(self): pass """) def test_abstractmethod_variants(self): # TODO(rechen): If we add a return type annotation to g, pytype reports # [bad-return-type] despite the method being abstract. self.Check(""" import abc class C(abc.ABC): @abc.abstractclassmethod def f(cls) -> int: ... @abc.abstractstaticmethod def g(): ... """) def test_inference(self): ty = self.Infer(""" from abc import abstractclassmethod from abc import abstractmethod from abc import abstractproperty from abc import abstractstaticmethod """) self.assertTypesMatchPytd( ty, """ import abc from typing import Callable, Type, TypeVar abstractclassmethod: Type[abc.abstractclassmethod] abstractproperty: Type[abc.abstractproperty] abstractstaticmethod: Type[abc.abstractstaticmethod] _FuncT = TypeVar('_FuncT', bound=Callable) def abstractmethod(funcobj: _FuncT) -> _FuncT: ... """, ) if __name__ == "__main__": test_base.main()
AbstractMethodTests
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pycodestyle/E70.py
{ "start": 893, "end": 944 }
class ____: ... #: def f(): ... #: E701:1:8 E702:1:13
C
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/compiler.py
{ "start": 272177, "end": 273130 }
class ____(GenericTypeCompiler): def process(self, type_, **kw): try: _compiler_dispatch = type_._compiler_dispatch except AttributeError: return self._visit_unknown(type_, **kw) else: return _compiler_dispatch(self, **kw) def __getattr__(self, key): if key.startswith("visit_"): return self._visit_unknown else: raise AttributeError(key) def _visit_unknown(self, type_, **kw): if type_.__class__.__name__ == type_.__class__.__name__.upper(): return type_.__class__.__name__ else: return repr(type_) def visit_null(self, type_, **kw): return "NULL" def visit_user_defined(self, type_, **kw): try: get_col_spec = type_.get_col_spec except AttributeError: return repr(type_) else: return get_col_spec(**kw)
StrSQLTypeCompiler
python
django__django
tests/ordering/models.py
{ "start": 1888, "end": 2051 }
class ____(models.Model): parent = models.ForeignKey(OrderedByExpression, models.CASCADE) class Meta: ordering = ["parent"]
OrderedByExpressionChild
python
django__django
tests/get_or_create/tests.py
{ "start": 23113, "end": 29256 }
class ____(TransactionTestCase): available_apps = ["get_or_create"] @skipUnlessDBFeature("has_select_for_update") @skipUnlessDBFeature("supports_transactions") def test_updates_in_transaction(self): """ Objects are selected and updated in a transaction to avoid race conditions. This test forces update_or_create() to hold the lock in another thread for a relatively long time so that it can update while it holds the lock. The updated field isn't a field in 'defaults', so update_or_create() shouldn't have an effect on it. """ lock_status = {"has_grabbed_lock": False} def birthday_sleep(): lock_status["has_grabbed_lock"] = True time.sleep(0.5) return date(1940, 10, 10) def update_birthday_slowly(): Person.objects.update_or_create( first_name="John", defaults={"birthday": birthday_sleep} ) # Avoid leaking connection for Oracle connection.close() def lock_wait(): # timeout after ~0.5 seconds for i in range(20): time.sleep(0.025) if lock_status["has_grabbed_lock"]: return True return False Person.objects.create( first_name="John", last_name="Lennon", birthday=date(1940, 10, 9) ) # update_or_create in a separate thread t = Thread(target=update_birthday_slowly) before_start = datetime.now() t.start() if not lock_wait(): self.skipTest("Database took too long to lock the row") # Update during lock Person.objects.filter(first_name="John").update(last_name="NotLennon") after_update = datetime.now() # Wait for thread to finish t.join() # The update remains and it blocked. updated_person = Person.objects.get(first_name="John") self.assertGreater(after_update - before_start, timedelta(seconds=0.5)) self.assertEqual(updated_person.last_name, "NotLennon") @skipUnlessDBFeature("has_select_for_update") @skipUnlessDBFeature("supports_transactions") def test_creation_in_transaction(self): """ Objects are selected and updated in a transaction to avoid race conditions. This test checks the behavior of update_or_create() when the object doesn't already exist, but another thread creates the object before update_or_create() does and then attempts to update the object, also before update_or_create(). It forces update_or_create() to hold the lock in another thread for a relatively long time so that it can update while it holds the lock. The updated field isn't a field in 'defaults', so update_or_create() shouldn't have an effect on it. """ locked_for_update = Event() save_allowed = Event() def wait_or_fail(event, message): if not event.wait(5): raise AssertionError(message) def birthday_yield(): # At this point the row should be locked as create or update # defaults are only called once the SELECT FOR UPDATE is issued. locked_for_update.set() # Yield back the execution to the main thread until it allows # save() to proceed. save_allowed.clear() return date(1940, 10, 10) person_save = Person.save def wait_for_allowed_save(*args, **kwargs): wait_or_fail(save_allowed, "Test took too long to allow save") return person_save(*args, **kwargs) def update_person(): try: with patch.object(Person, "save", wait_for_allowed_save): Person.objects.update_or_create( first_name="John", defaults={"last_name": "Doe", "birthday": birthday_yield}, ) finally: # Avoid leaking connection for Oracle. connection.close() t = Thread(target=update_person) t.start() wait_or_fail(locked_for_update, "Database took too long to lock row") # Create object *after* initial attempt by update_or_create to get obj # but before creation attempt. person = Person( first_name="John", last_name="Lennon", birthday=date(1940, 10, 9) ) # Don't use person.save() as it's gated by the save_allowed event. person_save(person, force_insert=True) # Now that the row is created allow the update_or_create() logic to # attempt a save(force_insert) that will inevitably fail and wait # until it yields back execution after performing a subsequent # locked select for update with an intent to save(force_update). locked_for_update.clear() save_allowed.set() wait_or_fail(locked_for_update, "Database took too long to lock row") allow_save = Timer(0.5, save_allowed.set) before_start = datetime.now() allow_save.start() # The following update() should block until the update_or_create() # initiated save() is allowed to proceed by the `allow_save` timer # setting `save_allowed` after 0.5 seconds. Person.objects.filter(first_name="John").update(last_name="NotLennon") after_update = datetime.now() # Wait for thread to finish. t.join() # Check call to update_or_create() succeeded and the subsequent # (blocked) call to update(). updated_person = Person.objects.get(first_name="John") # Confirm update_or_create() performed an update. self.assertEqual(updated_person.birthday, date(1940, 10, 10)) # Confirm update() was the last statement to run. self.assertEqual(updated_person.last_name, "NotLennon") # Confirm update() blocked at least the duration of the timer. self.assertGreater(after_update - before_start, timedelta(seconds=0.5))
UpdateOrCreateTransactionTests
python
ray-project__ray
python/ray/train/v2/_internal/execution/worker_group/state.py
{ "start": 474, "end": 1250 }
class ____: """Ongoing state of an active worker group. Attributes: start_time: The time when the worker group was started. workers: The workers in the worker group. These should always be in sorted order by world rank. placement_group: The placement group for the worker group. sync_actor: The synchronization actor for the worker group. """ start_time: float placement_group: PlacementGroup workers: List[Worker] sync_actor: ActorHandle @property def num_workers(self) -> int: return len(self.workers) def shutdown(self): _shutdown_workers(self.workers) _shutdown_placement_group(self.placement_group) _shutdown_sync_actor(self.sync_actor)
WorkerGroupState
python
pytorch__pytorch
torch/fx/experimental/proxy_tensor.py
{ "start": 42665, "end": 43659 }
class ____: """ Wrapper around a dictionary that will hash SymInts with their nodes """ def __init__(self) -> None: self.sym_node_dict: dict[PySymType, _PySymProxyType] = {} def __setitem__(self, key: PySymType, value: _PySymProxyType) -> None: self.sym_node_dict[key.node] = value def __getitem__(self, key: PySymType) -> _PySymProxyType: return self.sym_node_dict[key.node] def __contains__(self, key: PySymType) -> bool: return key.node in self.sym_node_dict def get( self, key: PySymType, default: Optional[_PySymProxyType] = None ) -> _PySymProxyType: # dict.get()'s annotation doesn't accept `None` when the value type # isn't Optional. return self.sym_node_dict.get(key.node, default) # type: ignore[arg-type, return-value] def __iter__(self) -> Any: raise NotImplementedError def __len__(self) -> int: return len(self.sym_node_dict) @dataclass
_SymNodeDict
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 428775, "end": 428961 }
class ____(VegaLiteSchema): """FontStyle schema wrapper.""" _schema = {"$ref": "#/definitions/FontStyle"} def __init__(self, *args): super().__init__(*args)
FontStyle
python
davidhalter__jedi
jedi/inference/value/instance.py
{ "start": 6760, "end": 11416 }
class ____(AbstractInstanceValue): @property def array_type(self): name = self.class_value.py__name__() if name in ['list', 'set', 'dict'] \ and self.parent_context.get_root_context().is_builtins_module(): return name return None @property def name(self): return ValueName(self, self.class_value.name.tree_name) def get_filters(self, origin_scope=None, include_self_names=True): class_value = self.get_annotated_class_object() if include_self_names: for cls in class_value.py__mro__(): if not cls.is_compiled(): # In this case we're excluding compiled objects that are # not fake objects. It doesn't make sense for normal # compiled objects to search for self variables. yield SelfAttributeFilter(self, class_value, cls.as_context(), origin_scope) class_filters = class_value.get_filters( origin_scope=origin_scope, is_instance=True, ) for f in class_filters: if isinstance(f, ClassFilter): yield InstanceClassFilter(self, f) elif isinstance(f, CompiledValueFilter): yield CompiledInstanceClassFilter(self, f) else: # Propably from the metaclass. yield f @inference_state_method_cache() def create_instance_context(self, class_context, node): new = node while True: func_node = new new = search_ancestor(new, 'funcdef', 'classdef') if class_context.tree_node is new: func = FunctionValue.from_context(class_context, func_node) bound_method = BoundMethod(self, class_context, func) if func_node.name.value == '__init__': context = bound_method.as_context(self._arguments) else: context = bound_method.as_context() break return context.create_context(node) def py__getattribute__alternatives(self, string_name): ''' Since nothing was inferred, now check the __getattr__ and __getattribute__ methods. Stubs don't need to be checked, because they don't contain any logic. ''' if self.is_stub(): return NO_VALUES name = compiled.create_simple_object(self.inference_state, string_name) # This is a little bit special. `__getattribute__` is in Python # executed before `__getattr__`. But: I know no use case, where # this could be practical and where Jedi would return wrong types. # If you ever find something, let me know! # We are inversing this, because a hand-crafted `__getattribute__` # could still call another hand-crafted `__getattr__`, but not the # other way around. if is_big_annoying_library(self.parent_context): return NO_VALUES names = (self.get_function_slot_names('__getattr__') or self.get_function_slot_names('__getattribute__')) return self.execute_function_slots(names, name) def py__next__(self, contextualized_node=None): name = u'__next__' next_slot_names = self.get_function_slot_names(name) if next_slot_names: yield LazyKnownValues( self.execute_function_slots(next_slot_names) ) else: debug.warning('Instance has no __next__ function in %s.', self) def py__call__(self, arguments): names = self.get_function_slot_names('__call__') if not names: # Means the Instance is not callable. return super().py__call__(arguments) return ValueSet.from_sets(name.infer().execute(arguments) for name in names) def py__get__(self, instance, class_value): """ obj may be None. """ # Arguments in __get__ descriptors are obj, class. # `method` is the new parent of the array, don't know if that's good. for cls in self.class_value.py__mro__(): result = cls.py__get__on_class(self, instance, class_value) if result is not NotImplemented: return result names = self.get_function_slot_names('__get__') if names: if instance is None: instance = compiled.builtin_from_name(self.inference_state, 'None') return self.execute_function_slots(names, instance, class_value) else: return ValueSet([self])
_BaseTreeInstance
python
weaviate__weaviate-python-client
weaviate/collections/classes/config.py
{ "start": 11013, "end": 11317 }
class ____(_GenerativeProvider): generative: Union[GenerativeSearches, _EnumLikeStr] = Field( default=GenerativeSearches.MISTRAL, frozen=True, exclude=True ) temperature: Optional[float] model: Optional[str] maxTokens: Optional[int] baseURL: Optional[str]
_GenerativeMistral
python
tensorflow__tensorflow
tensorflow/python/framework/extension_type.py
{ "start": 21123, "end": 23520 }
class ____: """Codec for `tf.ExtensionTypeSpec`.""" def can_encode(self, pyobj): """Returns true if `pyobj` can be encoded as an ExtensionTypeSpec.""" if isinstance(pyobj, ExtensionTypeSpec): try: type_spec_registry.get_name(type(pyobj)) return True except ValueError: return False return False def do_encode(self, extension_type_spec_value, encode_fn): """Returns an encoded proto for the given `tf.ExtensionTypeSpec`.""" type_spec_class_name = type_spec_registry.get_name( type(extension_type_spec_value) ) type_state = extension_type_spec_value._serialize() # pylint: disable=protected-access num_flat_components = len( nest.flatten( extension_type_spec_value._component_specs, expand_composites=True # pylint: disable=protected-access ) ) encoded_type_spec = struct_pb2.StructuredValue() encoded_type_spec.type_spec_value.CopyFrom( struct_pb2.TypeSpecProto( type_spec_class=struct_pb2.TypeSpecProto.EXTENSION_TYPE_SPEC, type_state=encode_fn(type_state), type_spec_class_name=type_spec_class_name, num_flat_components=num_flat_components, ) ) return encoded_type_spec def can_decode(self, value): """Returns true if `value` can be decoded into a `tf.ExtensionTypeSpec`.""" if value.HasField('type_spec_value'): type_spec_class_enum = value.type_spec_value.type_spec_class return ( type_spec_class_enum == struct_pb2.TypeSpecProto.EXTENSION_TYPE_SPEC ) return False def do_decode(self, value, decode_fn): """Returns the `tf.TypeSpec` encoded by the proto `value`.""" type_spec_proto = value.type_spec_value class_name = type_spec_proto.type_spec_class_name try: type_spec_class = type_spec_registry.lookup(class_name) except ValueError: type_spec_class = AnonymousExtensionTypeSpec warnings.warn( f"The type '{class_name}' has not been registered. " 'Falling back to using AnonymousExtensionTypeSpec ' 'instead.' ) # pylint: disable=protected-access return type_spec_class._deserialize(decode_fn(type_spec_proto.type_state)) nested_structure_coder.register_codec(_ExtensionTypeSpecCodec()) @tf_export('experimental.ExtensionTypeBatchEncoder')
_ExtensionTypeSpecCodec
python
Lightning-AI__lightning
src/lightning/pytorch/utilities/combined_loader.py
{ "start": 6430, "end": 7226 }
class ____(_ModeIterator): @override def __next__(self) -> _ITERATOR_RETURN: n = len(self.iterators) out = [None] * n all_exhausted = True for i in range(n): with contextlib.suppress(StopIteration): out[i] = next(self.iterators[i]) all_exhausted = False if all_exhausted: raise StopIteration index = self._idx self._idx += 1 return out, index, 0 @override def __len__(self) -> int: lengths = _get_iterables_lengths(self.iterables) if self.limits is not None: return max(min(length, limit) for length, limit in zip(lengths, self.limits)) # type: ignore[return-value] return max(lengths) # type: ignore[return-value]
_MaxSize
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 107482, "end": 107750 }
class ____(BaseModel): direction: "ReshardingDirection" = Field(..., description="") shard_id: int = Field(..., description="") peer_id: int = Field(..., description="") shard_key: Optional["ShardKey"] = Field(default=None, description="")
ReshardingInfo
python
wandb__wandb
wandb/vendor/pygments/lexers/html.py
{ "start": 3374, "end": 6000 }
class ____(RegexLexer): """ A lexer for DTDs (Document Type Definitions). .. versionadded:: 1.5 """ flags = re.MULTILINE | re.DOTALL name = 'DTD' aliases = ['dtd'] filenames = ['*.dtd'] mimetypes = ['application/xml-dtd'] tokens = { 'root': [ include('common'), (r'(<!ELEMENT)(\s+)(\S+)', bygroups(Keyword, Text, Name.Tag), 'element'), (r'(<!ATTLIST)(\s+)(\S+)', bygroups(Keyword, Text, Name.Tag), 'attlist'), (r'(<!ENTITY)(\s+)(\S+)', bygroups(Keyword, Text, Name.Entity), 'entity'), (r'(<!NOTATION)(\s+)(\S+)', bygroups(Keyword, Text, Name.Tag), 'notation'), (r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections bygroups(Keyword, Name.Entity, Text, Keyword)), (r'(<!DOCTYPE)(\s+)([^>\s]+)', bygroups(Keyword, Text, Name.Tag)), (r'PUBLIC|SYSTEM', Keyword.Constant), (r'[\[\]>]', Keyword), ], 'common': [ (r'\s+', Text), (r'(%|&)[^;]*;', Name.Entity), ('<!--', Comment, 'comment'), (r'[(|)*,?+]', Operator), (r'"[^"]*"', String.Double), (r'\'[^\']*\'', String.Single), ], 'comment': [ ('[^-]+', Comment), ('-->', Comment, '#pop'), ('-', Comment), ], 'element': [ include('common'), (r'EMPTY|ANY|#PCDATA', Keyword.Constant), (r'[^>\s|()?+*,]+', Name.Tag), (r'>', Keyword, '#pop'), ], 'attlist': [ include('common'), (r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION', Keyword.Constant), (r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant), (r'xml:space|xml:lang', Keyword.Reserved), (r'[^>\s|()?+*,]+', Name.Attribute), (r'>', Keyword, '#pop'), ], 'entity': [ include('common'), (r'SYSTEM|PUBLIC|NDATA', Keyword.Constant), (r'[^>\s|()?+*,]+', Name.Entity), (r'>', Keyword, '#pop'), ], 'notation': [ include('common'), (r'SYSTEM|PUBLIC', Keyword.Constant), (r'[^>\s|()?+*,]+', Name.Attribute), (r'>', Keyword, '#pop'), ], } def analyse_text(text): if not looks_like_xml(text) and \ ('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text): return 0.8
DtdLexer
python
python-poetry__poetry
src/poetry/console/commands/lock.py
{ "start": 269, "end": 1136 }
class ____(InstallerCommand): name = "lock" description = "Locks the project dependencies." options: ClassVar[list[Option]] = [ option( "regenerate", None, "Ignore existing lock file" " and overwrite it with a new lock file created from scratch.", ), ] help = """ The <info>lock</info> command reads the <comment>pyproject.toml</> file from the current directory, processes it, and locks the dependencies in the\ <comment>poetry.lock</> file. By default, packages that have already been added to the lock file before will not be updated. <info>poetry lock</info> """ loggers: ClassVar[list[str]] = ["poetry.repositories.pypi_repository"] def handle(self) -> int: self.installer.lock(update=self.option("regenerate")) return self.installer.run()
LockCommand
python
pypa__pip
src/pip/_internal/operations/install/wheel.py
{ "start": 12379, "end": 13951 }
class ____: def __init__( self, src_record_path: RecordPath, dest_path: str, zip_file: ZipFile ) -> None: self.src_record_path = src_record_path self.dest_path = dest_path self._zip_file = zip_file self.changed = False def _getinfo(self) -> ZipInfo: return self._zip_file.getinfo(self.src_record_path) def save(self) -> None: # When we open the output file below, any existing file is truncated # before we start writing the new contents. This is fine in most # cases, but can cause a segfault if pip has loaded a shared # object (e.g. from pyopenssl through its vendored urllib3) # Since the shared object is mmap'd an attempt to call a # symbol in it will then cause a segfault. Unlinking the file # allows writing of new contents while allowing the process to # continue to use the old copy. if os.path.exists(self.dest_path): os.unlink(self.dest_path) zipinfo = self._getinfo() # optimization: the file is created by open(), # skip the decompression when there is 0 bytes to decompress. with open(self.dest_path, "wb") as dest: if zipinfo.file_size > 0: with self._zip_file.open(zipinfo) as f: blocksize = min(zipinfo.file_size, 1024 * 1024) shutil.copyfileobj(f, dest, blocksize) if zip_item_is_executable(zipinfo): set_extracted_file_to_default_mode_plus_executable(self.dest_path)
ZipBackedFile
python
tensorflow__tensorflow
tensorflow/python/ops/init_ops.py
{ "start": 58634, "end": 60068 }
class ____(VarianceScaling): """The Glorot uniform initializer, also called Xavier uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) """ @deprecated_args(None, "Call initializer instance with the dtype argument instead " "of passing it to the constructor", "dtype") def __init__(self, seed=None, dtype=dtypes.float32): super(GlorotUniform, self).__init__( scale=1.0, mode="fan_avg", distribution="uniform", seed=seed) def get_config(self): return {"seed": self.seed, "dtype": self.dtype.name} @tf_export(v1=["glorot_normal_initializer", "initializers.glorot_normal"]) @deprecation.deprecated_endpoints("glorot_normal_initializer", "initializers.glorot_normal")
GlorotUniform
python
scipy__scipy
scipy/linalg/tests/test_matfuncs.py
{ "start": 22278, "end": 30585 }
class ____: def test_round_trip_random_complex(self): rng = np.random.default_rng(1234) for p in range(1, 5): for n in range(1, 5): M_unscaled = (rng.standard_normal((n, n)) + 1j * rng.standard_normal((n, n))) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_root = fractional_matrix_power(M, 1/p) M_round_trip = np.linalg.matrix_power(M_root, p) assert_allclose(M_round_trip, M) def test_round_trip_random_float(self): # This test is more annoying because it can hit the branch cut; # this happens when the matrix has an eigenvalue # with no imaginary component and with a real negative component, # and it means that the principal branch does not exist. rng = np.random.default_rng(1234) for p in range(1, 5): for n in range(1, 5): M_unscaled = rng.standard_normal((n, n)) for scale in np.logspace(-4, 4, 9): M = M_unscaled * scale M_root = fractional_matrix_power(M, 1/p) M_round_trip = np.linalg.matrix_power(M_root, p) assert_allclose(M_round_trip, M) def test_larger_abs_fractional_matrix_powers(self): rng = np.random.default_rng(1234) for n in (2, 3, 5): for i in range(10): M = rng.standard_normal((n, n)) + 1j * rng.standard_normal((n, n)) M_one_fifth = fractional_matrix_power(M, 0.2) # Test the round trip. M_round_trip = np.linalg.matrix_power(M_one_fifth, 5) assert_allclose(M, M_round_trip) # Test a large abs fractional power. X = fractional_matrix_power(M, -5.4) Y = np.linalg.matrix_power(M_one_fifth, -27) assert_allclose(X, Y) # Test another large abs fractional power. X = fractional_matrix_power(M, 3.8) Y = np.linalg.matrix_power(M_one_fifth, 19) assert_allclose(X, Y) def test_random_matrices_and_powers(self): # Each independent iteration of this fuzz test picks random parameters. # It tries to hit some edge cases. rng = np.random.default_rng(1726500458620605) nsamples = 20 for i in range(nsamples): # Sample a matrix size and a random real power. n = rng.integers(1, 5) p = rng.random() # Sample a random real or complex matrix. matrix_scale = np.exp(rng.integers(-4, 5)) A = rng.random(size=[n, n]) if [True, False][rng.choice(2)]: A = A + 1j * rng.random(size=[n, n]) A = A * matrix_scale # Check a couple of analytically equivalent ways # to compute the fractional matrix power. # These can be compared because they both use the principal branch. A_power = fractional_matrix_power(A, p) A_logm = logm(A) A_power_expm_logm = expm(A_logm * p) assert_allclose(A_power, A_power_expm_logm) def test_al_mohy_higham_2012_experiment_1(self): # Fractional powers of a tricky upper triangular matrix. A = _get_al_mohy_higham_2012_experiment_1() # Test remainder matrix power. A_funm_sqrt = funm(A, np.sqrt) A_sqrtm = sqrtm(A) A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5) A_power = fractional_matrix_power(A, 0.5) assert_allclose(A_rem_power, A_power, rtol=1e-11) assert_allclose(A_sqrtm, A_power) assert_allclose(A_sqrtm, A_funm_sqrt) # Test more fractional powers. for p in (1/2, 5/3): A_power = fractional_matrix_power(A, p) A_round_trip = fractional_matrix_power(A_power, 1/p) assert_allclose(A_round_trip, A, rtol=1e-2) assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1)) def test_briggs_helper_function(self): rng = np.random.default_rng(1234) for a in rng.standard_normal(10) + 1j * rng.standard_normal(10): for k in range(5): x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k) x_expected = a ** np.exp2(-k) - 1 assert_allclose(x_observed, x_expected) def test_type_preservation_and_conversion(self): # The fractional_matrix_power matrix function should preserve # the type of a matrix whose eigenvalues # are positive with zero imaginary part. # Test this preservation for variously structured matrices. complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, 1]], [[1, 0], [1, 1]], [[2, 1], [1, 1]], [[2, 3], [1, 2]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(not any(w.imag or w.real < 0 for w in W)) # Check various positive and negative powers # with absolute values bigger and smaller than 1. for p in (-2.4, -0.9, 0.2, 3.3): # check float type preservation A = np.array(matrix_as_list, dtype=float) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char not in complex_dtype_chars) # check complex type preservation A = np.array(matrix_as_list, dtype=complex) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) # check float->complex for the matrix negation A = -np.array(matrix_as_list, dtype=float) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) def test_type_conversion_mixed_sign_or_complex_spectrum(self): complex_dtype_chars = ('F', 'D', 'G') for matrix_as_list in ( [[1, 0], [0, -1]], [[0, 1], [1, 0]], [[0, 1, 0], [0, 0, 1], [1, 0, 0]]): # check that the spectrum has the expected properties W = scipy.linalg.eigvals(matrix_as_list) assert_(any(w.imag or w.real < 0 for w in W)) # Check various positive and negative powers # with absolute values bigger and smaller than 1. for p in (-2.4, -0.9, 0.2, 3.3): # check complex->complex A = np.array(matrix_as_list, dtype=complex) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) # check float->complex A = np.array(matrix_as_list, dtype=float) A_power = fractional_matrix_power(A, p) assert_(A_power.dtype.char in complex_dtype_chars) @pytest.mark.xfail(reason='Too unstable across LAPACKs.') def test_singular(self): # Negative fractional powers do not work with singular matrices. for matrix_as_list in ( [[0, 0], [0, 0]], [[1, 1], [1, 1]], [[1, 2], [3, 6]], [[0, 0, 0], [0, 1, 1], [0, -1, 1]]): # Check fractional powers both for float and for complex types. for newtype in (float, complex): A = np.array(matrix_as_list, dtype=newtype) for p in (-0.7, -0.9, -2.4, -1.3): A_power = fractional_matrix_power(A, p) assert_(np.isnan(A_power).all()) for p in (0.2, 1.43): A_power = fractional_matrix_power(A, p) A_round_trip = fractional_matrix_power(A_power, 1/p) assert_allclose(A_round_trip, A) def test_opposite_sign_complex_eigenvalues(self): M = [[2j, 4], [0, -2j]] R = [[1+1j, 2], [0, 1-1j]] assert_allclose(np.dot(R, R), M, atol=1e-14) assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14)
TestFractionalMatrixPower
python
numpy__numpy
numpy/f2py/_backends/_backend.py
{ "start": 38, "end": 1151 }
class ____(ABC): def __init__( self, modulename, sources, extra_objects, build_dir, include_dirs, library_dirs, libraries, define_macros, undef_macros, f2py_flags, sysinfo_flags, fc_flags, flib_flags, setup_flags, remove_build_dir, extra_dat, ): self.modulename = modulename self.sources = sources self.extra_objects = extra_objects self.build_dir = build_dir self.include_dirs = include_dirs self.library_dirs = library_dirs self.libraries = libraries self.define_macros = define_macros self.undef_macros = undef_macros self.f2py_flags = f2py_flags self.sysinfo_flags = sysinfo_flags self.fc_flags = fc_flags self.flib_flags = flib_flags self.setup_flags = setup_flags self.remove_build_dir = remove_build_dir self.extra_dat = extra_dat @abstractmethod def compile(self) -> None: """Compile the wrapper.""" pass
Backend
python
django-import-export__django-import-export
tests/core/tests/admin_integration/test_import_security.py
{ "start": 220, "end": 2008 }
class ____(AdminTestMixin, TestCase): def test_csrf(self): self._get_url_response(self.book_process_import_url, expected_status_code=405) def test_import_file_name_in_tempdir(self): # 65 - import_file_name form field can be use to access the filesystem import_file_name = os.path.join( os.path.dirname(__file__), os.path.pardir, "exports", "books.csv" ) data = { "format": "0", "import_file_name": import_file_name, "original_file_name": "books.csv", } self._prepend_form_prefix(data) with self.assertRaises(FileNotFoundError): self._post_url_response(self.book_process_import_url, data) def test_import_buttons_visible_without_add_permission(self): # When using ImportMixin, users should be able to see the import button # without add permission (to be consistent with ImportExportMixin) original = AuthorAdmin.has_add_permission AuthorAdmin.has_add_permission = lambda self, request: False response = self._get_url_response(self.core_author_url) AuthorAdmin.has_add_permission = original self.assertContains(response, _("Import")) self.assertTemplateUsed(response, self.change_list_url) def test_import_export_buttons_visible_without_add_permission(self): # issue 38 - Export button not visible when no add permission original = BookAdmin.has_add_permission BookAdmin.has_add_permission = lambda self, request: False response = self._get_url_response(self.book_import_url) BookAdmin.has_add_permission = original self.assertContains(response, _("Export")) self.assertContains(response, _("Import"))
ImportAdminSecurityTests
python
altair-viz__altair
tests/utils/test_schemapi.py
{ "start": 3581, "end": 3740 }
class ____(_TestSchema): _schema = { "$schema": _JSON_SCHEMA_DRAFT_URL, "anyOf": [{"type": "integer"}, {"type": "string"}], }
SimpleUnion
python
dask__distributed
distributed/diagnostics/plugin.py
{ "start": 29273, "end": 29604 }
class ____(NannyPlugin): restart = True def __init__(self, environ: dict | None = None): environ = environ or {} self.environ = {k: str(v) for k, v in environ.items()} async def setup(self, nanny): nanny.env.update(self.environ) UPLOAD_DIRECTORY_MODES = ["all", "scheduler", "workers"]
Environ
python
pypa__pip
src/pip/_internal/exceptions.py
{ "start": 7522, "end": 8497 }
class ____(PipError): """Raised when accessing a Distribution's "METADATA" or "PKG-INFO". This signifies an inconsistency, when the Distribution claims to have the metadata file (if not, raise ``FileNotFoundError`` instead), but is not actually able to produce its content. This may be due to permission errors. """ def __init__( self, dist: BaseDistribution, metadata_name: str, ) -> None: """ :param dist: A Distribution object. :param metadata_name: The name of the metadata being accessed (can be "METADATA" or "PKG-INFO"). """ self.dist = dist self.metadata_name = metadata_name def __str__(self) -> str: # Use `dist` in the error message because its stringification # includes more information, like the version and location. return f"None {self.metadata_name} metadata found for distribution: {self.dist}"
NoneMetadataError
python
sympy__sympy
sympy/stats/crv_types.py
{ "start": 55681, "end": 57412 }
class ____(SingleContinuousDistribution): _argnames = ('b', 'eta') set = Interval(0, oo) @staticmethod def check(b, eta): _value_check(b > 0, "b must be positive") _value_check(eta > 0, "eta must be positive") def pdf(self, x): eta, b = self.eta, self.b return b*eta*exp(b*x)*exp(eta)*exp(-eta*exp(b*x)) def _cdf(self, x): eta, b = self.eta, self.b return 1 - exp(eta)*exp(-eta*exp(b*x)) def _moment_generating_function(self, t): eta, b = self.eta, self.b return eta * exp(eta) * expint(t/b, eta) def Gompertz(name, b, eta): r""" Create a Continuous Random Variable with Gompertz distribution. Explanation =========== The density of the Gompertz distribution is given by .. math:: f(x) := b \eta e^{b x} e^{\eta} \exp \left(-\eta e^{bx} \right) with :math:`x \in [0, \infty)`. Parameters ========== b : Real number, `b > 0`, a scale eta : Real number, `\eta > 0`, a shape Returns ======= RandomSymbol Examples ======== >>> from sympy.stats import Gompertz, density >>> from sympy import Symbol >>> b = Symbol("b", positive=True) >>> eta = Symbol("eta", positive=True) >>> z = Symbol("z") >>> X = Gompertz("x", b, eta) >>> density(X)(z) b*eta*exp(eta)*exp(b*z)*exp(-eta*exp(b*z)) References ========== .. [1] https://en.wikipedia.org/wiki/Gompertz_distribution """ return rv(name, GompertzDistribution, (b, eta)) #------------------------------------------------------------------------------- # Kumaraswamy distribution -----------------------------------------------------
GompertzDistribution
python
scipy__scipy
scipy/interpolate/tests/test_interpnd.py
{ "start": 8790, "end": 15527 }
class ____: def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False, rescale=False, **kw): rng = np.random.RandomState(1234) # np.random.seed(1234) if x is None: x = np.array([(0, 0), (0, 1), (1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8), (0.5, 0.2)], dtype=float) if not alternate: ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]), tol=1e-6, rescale=rescale) else: ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]), func(x[:,0], x[:,1]), tol=1e-6, rescale=rescale) p = rng.rand(50, 2) if not alternate: a = ip(p) else: a = ip(p[:,0], p[:,1]) b = func(p[:,0], p[:,1]) try: xp_assert_close(a, b, **kw) except AssertionError: print("_check_accuracy: abs(a-b):", abs(a - b)) print("ip.grad:", ip.grad) raise def test_linear_smoketest(self): # Should be exact for linear functions, independent of triangulation funcs = [ lambda x, y: 0*x + 1, lambda x, y: 0 + x, lambda x, y: -2 + y, lambda x, y: 3 + 3*x + 14.15*y, ] for j, func in enumerate(funcs): self._check_accuracy( func, tol=1e-13, atol=1e-7, rtol=1e-7, err_msg=f"Function {j}" ) self._check_accuracy( func, tol=1e-13, atol=1e-7, rtol=1e-7, alternate=True, err_msg=f"Function (alternate) {j}" ) # check rescaling self._check_accuracy( func, tol=1e-13, atol=1e-7, rtol=1e-7, err_msg=f"Function (rescaled) {j}", rescale=True ) self._check_accuracy( func, tol=1e-13, atol=1e-7, rtol=1e-7, alternate=True, rescale=True, err_msg=f"Function (alternate, rescaled) {j}" ) def test_quadratic_smoketest(self): # Should be reasonably accurate for quadratic functions funcs = [ lambda x, y: x**2, lambda x, y: y**2, lambda x, y: x**2 - y**2, lambda x, y: x*y, ] for j, func in enumerate(funcs): self._check_accuracy( func, tol=1e-9, atol=0.22, rtol=0, err_msg=f"Function {j}" ) self._check_accuracy( func, tol=1e-9, atol=0.22, rtol=0, err_msg=f"Function {j}", rescale=True ) def test_tri_input(self): # Test at single points x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], dtype=np.float64) y = np.arange(x.shape[0], dtype=np.float64) y = y - 3j*y tri = qhull.Delaunay(x) yi = interpnd.CloughTocher2DInterpolator(tri, y)(x) assert_almost_equal(y, yi) def test_tri_input_rescale(self): # Test at single points x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)], dtype=np.float64) y = np.arange(x.shape[0], dtype=np.float64) y = y - 3j*y tri = qhull.Delaunay(x) match = ("Rescaling is not supported when passing a " "Delaunay triangulation as ``points``.") with pytest.raises(ValueError, match=match): interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x) def test_tripoints_input_rescale(self): # Test at single points x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)], dtype=np.float64) y = np.arange(x.shape[0], dtype=np.float64) y = y - 3j*y tri = qhull.Delaunay(x) yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x) yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x) assert_almost_equal(yi, yi_rescale) @pytest.mark.fail_slow(5) def test_dense(self): # Should be more accurate for dense meshes funcs = [ lambda x, y: x**2, lambda x, y: y**2, lambda x, y: x**2 - y**2, lambda x, y: x*y, lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y) ] rng = np.random.RandomState(4321) # use a different seed than the check! grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float), rng.rand(30*30, 2)] for j, func in enumerate(funcs): self._check_accuracy( func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2, err_msg=f"Function {j}" ) self._check_accuracy( func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2, err_msg=f"Function {j}", rescale=True ) def test_wrong_ndim(self): x = np.random.randn(30, 3) y = np.random.randn(30) assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y) def test_pickle(self): # Test at single points rng = np.random.RandomState(1234) x = rng.rand(30, 2) y = rng.rand(30) + 1j*rng.rand(30) ip = interpnd.CloughTocher2DInterpolator(x, y) ip2 = pickle.loads(pickle.dumps(ip)) assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5)) def test_boundary_tri_symmetry(self): # Interpolation at neighbourless triangles should retain # symmetry with mirroring the triangle. # Equilateral triangle points = np.array([(0, 0), (1, 0), (0.5, np.sqrt(3)/2)]) values = np.array([1, 0, 0]) ip = interpnd.CloughTocher2DInterpolator(points, values) # Set gradient to zero at vertices ip.grad[...] = 0 # Interpolation should be symmetric vs. bisector alpha = 0.3 p1 = np.array([0.5 * np.cos(alpha), 0.5 * np.sin(alpha)]) p2 = np.array([0.5 * np.cos(np.pi/3 - alpha), 0.5 * np.sin(np.pi/3 - alpha)]) v1 = ip(p1) v2 = ip(p2) xp_assert_close(v1, v2) # ... and affine invariant rng = np.random.RandomState(1) A = rng.randn(2, 2) b = rng.randn(2) points = A.dot(points.T).T + b[None,:] p1 = A.dot(p1) + b p2 = A.dot(p2) + b ip = interpnd.CloughTocher2DInterpolator(points, values) ip.grad[...] = 0 w1 = ip(p1) w2 = ip(p2) xp_assert_close(w1, v1) xp_assert_close(w2, v2)
TestCloughTocher2DInterpolator
python
rapidsai__cudf
python/cudf/cudf/core/indexing_utils.py
{ "start": 1224, "end": 19581 }
class ____: """An indexer for a scalar value.""" key: GatherMap IndexingSpec: TypeAlias = ( EmptyIndexer | MapIndexer | MaskIndexer | ScalarIndexer | SliceIndexer ) # Helpers for code-sharing between loc and iloc paths def expand_key( key: Any, frame: DataFrame | Series, method_type: Literal["iloc", "loc"] ) -> tuple[Any, ...]: """Slice-expand key to match dimension of the frame being indexed. Parameters ---------- key Key to expand frame DataFrame or Series to expand to the dimension of. Returns ------- tuple New key of length equal to the dimension of the frame. Raises ------ IndexError If the provided key is a tuple and has more entries than the frame dimension. Notes ----- If any individual entry in the key is a callable, it is called with the provided frame as argument and is required to be converted into a supported indexing type. """ dim = len(frame.shape) if ( isinstance(key, bool) or ( isinstance(key, Series) and key.dtype.kind == "b" and method_type == "loc" and len(key) != len(frame) ) or ( isinstance(key, Series) and key.dtype.kind == "b" and method_type == "iloc" ) ) and not ( frame.index.dtype.kind == "b" or isinstance(frame.index, MultiIndex) and frame.index.get_level_values(0).dtype.kind == "b" ): raise KeyError( f"{key}: boolean label can not be used without a boolean index" ) if isinstance(key, slice) and ( isinstance(key.start, bool) or isinstance(key.stop, bool) ): raise TypeError(f"{key}: boolean values can not be used in a slice") if isinstance(key, tuple): # Key potentially indexes rows and columns, slice-expand to # shape of frame if len(key) > 1 and sum(k is Ellipsis for k in key) > 1: raise IndexError("indexer may only contain one '...' entry") indexers = key + (slice(None),) * (dim - len(key)) if len(indexers) > dim: raise IndexError( f"Too many indexers: got {len(indexers)} expected {dim}" ) else: # Key indexes rows, slice-expand to shape of frame indexers = (key, *(slice(None),) * (dim - 1)) return tuple(k(frame) if callable(k) else k for k in indexers) def destructure_dataframe_indexer( key: Any, frame: DataFrame, destructure: Callable[[Any, DataFrame], tuple[Any, Any]], is_scalar: Callable[[Any, ColumnAccessor], bool], get_ca: str, ): """ Pick apart an indexing key for a DataFrame into constituent pieces. Parameters ---------- key The key to unpick. frame The DataFrame being indexed. destructure Callable to split the key into a two-tuple of row keys and column keys. is_scalar Callable to report if the column indexer produces a single column. get_ca Method name to obtain the column accessor from the frame. Returns ------- rows Indexing expression for the rows tuple Two-tuple indicating if the column indexer produces a scalar and a subsetted ColumnAccessor. Raises ------ TypeError If the column indexer is invalid. """ rows, cols = destructure(key, frame) from cudf.core.series import Series if cols is Ellipsis: cols = slice(None) elif isinstance(cols, (Index, Series)): cols = cols.to_pandas() try: ca = getattr(frame._data, get_ca)(cols) except TypeError as e: raise TypeError( "Column indices must be names, slices, " "list-like of names, or boolean mask" ) from e scalar = is_scalar(cols, ca) if scalar: assert len(ca) == 1, ( "Scalar column indexer should not produce more than one column" ) return rows, (scalar, ca) def destructure_iloc_key( key: Any, frame: Series | DataFrame ) -> tuple[Any, ...]: """ Destructure a potentially tuple-typed key into row and column indexers. Tuple arguments to iloc indexing are treated specially. They are picked apart into indexers for the row and column. If the number of entries is less than the number of modes of the frame, missing entries are slice-expanded. If the user-provided key is not a tuple, it is treated as if it were a singleton tuple, and then slice-expanded. Once this destructuring has occurred, any entries that are callables are then called with the indexed frame. This should return a valid indexing object for the rows (respectively columns), namely one of: - A boolean mask of the same length as the frame in the given dimension - A scalar integer that indexes the frame - An array-like of integers that index the frame - A slice that indexes the frame Integer and slice-based indexing follows usual Python conventions. Parameters ---------- key The key to destructure frame DataFrame or Series to provide context Returns ------- tuple Indexers with length equal to the dimension of the frame Raises ------ IndexError If there are too many indexers, or any individual indexer is a tuple. """ indexers = expand_key(key, frame, "iloc") if any(isinstance(k, tuple) for k in indexers): raise IndexError( "Too many indexers: can't have nested tuples in iloc indexing" ) return indexers def destructure_dataframe_iloc_indexer( key: Any, frame: DataFrame ) -> tuple[Any, tuple[bool, ColumnAccessor]]: """Destructure an index key for DataFrame iloc getitem. Parameters ---------- key Key to destructure frame DataFrame to provide context context Returns ------- tuple 2-tuple of a key for the rows and tuple of (column_index_is_scalar, ColumnAccessor) for the columns Raises ------ TypeError If the column indexer is invalid IndexError If the provided key does not destructure correctly NotImplementedError If the requested column indexer repeats columns """ return destructure_dataframe_indexer( key, frame, destructure_iloc_key, lambda col, _ca: is_integer(col), "select_by_index", ) def destructure_series_iloc_indexer(key: Any, frame: Series) -> Any: """Destructure an index key for Series iloc getitem. Parameters ---------- key Key to destructure frame Series for unpacking context Returns ------- Single key that will index the rows """ (rows,) = destructure_iloc_key(key, frame) return rows def parse_row_iloc_indexer(key: Any, n: int) -> IndexingSpec: """ Normalize and produce structured information about a row indexer. Given a row indexer that has already been destructured by :func:`destructure_iloc_key`, inspect further and produce structured information for indexing operations to act upon. Parameters ---------- key Suitably destructured key for row indexing n Length of frame to index Returns ------- IndexingSpec Structured data for indexing. A tag + parsed data. Raises ------ IndexError If a valid type of indexer is provided, but it is out of bounds TypeError If the indexing key is otherwise invalid. """ if key is Ellipsis: return SliceIndexer(slice(None)) elif isinstance(key, slice): return SliceIndexer(key) elif _is_scalar_or_zero_d_array(key): return ScalarIndexer(GatherMap(key, n, nullify=False)) else: key = as_column(key) if isinstance(key.dtype, CategoricalDtype): key = key.astype(key.codes.dtype) if key.dtype.kind == "b": return MaskIndexer(BooleanMask(key, n)) elif len(key) == 0: return EmptyIndexer() elif key.dtype.kind in "iu": return MapIndexer(GatherMap(key, n, nullify=False)) else: raise TypeError( "Cannot index by location " f"with non-integer key of type {type(key)}" ) def destructure_loc_key( key: Any, frame: Series | DataFrame ) -> tuple[Any, ...]: """ Destructure a potentially tuple-typed key into row and column indexers Tuple arguments to loc indexing are treated specially. They are picked apart into indexers for the row and column. If the number of entries is less than the number of modes of the frame, missing entries are slice-expanded. If the user-provided key is not a tuple, it is treated as if it were a singleton tuple, and then slice-expanded. Once this destructuring has occurred, any entries that are callables are then called with the indexed frame. This should return a valid indexing object for the rows (respectively columns), namely one of: - A boolean mask of the same length as the frame in the given dimension - A scalar label looked up in the index - A scalar integer that indexes the frame - An array-like of labels looked up in the index - A slice of the index - For multiindices, a tuple of per level indexers Slice-based indexing is on the closed interval [start, end], rather than the semi-open interval [start, end) Parameters ---------- key The key to destructure frame DataFrame or Series to provide context Returns ------- tuple of indexers with length equal to the dimension of the frame Raises ------ IndexError If there are too many indexers. """ return expand_key(key, frame, "loc") def destructure_dataframe_loc_indexer( key: Any, frame: DataFrame ) -> tuple[Any, tuple[bool, ColumnAccessor]]: """Destructure an index key for DataFrame loc getitem. Parameters ---------- key Key to destructure frame DataFrame to provide context context Returns ------- tuple 2-tuple of a key for the rows and tuple of (column_index_is_scalar, ColumnAccessor) for the columns Raises ------ TypeError If the column indexer is invalid IndexError If the provided key does not destructure correctly NotImplementedError If the requested column indexer repeats columns """ def is_scalar(name: Any, ca: ColumnAccessor) -> bool: try: return name in ca except TypeError: return False return destructure_dataframe_indexer( key, frame, destructure_loc_key, is_scalar, "select_by_label" ) def destructure_series_loc_indexer(key: Any, frame: Series) -> Any: """Destructure an index key for Series loc getitem. Parameters ---------- key Key to destructure frame Series for unpacking context Returns ------- Single key that will index the rows """ (rows,) = destructure_loc_key(key, frame) return rows def ordered_find(needles: ColumnBase, haystack: ColumnBase) -> GatherMap: """Find locations of needles in a haystack preserving order Parameters ---------- needles Labels to look for haystack Haystack to search in Returns ------- NumericalColumn Integer gather map of locations needles were found in haystack Raises ------ KeyError If not all needles were found in the haystack. If needles cannot be converted to the dtype of haystack. Notes ----- This sorts the gather map so that the result comes back in the order the needles were specified (and are found in the haystack). """ # Pre-process to match dtypes needle_kind = needles.dtype.kind haystack_kind = haystack.dtype.kind if haystack_kind == "O" and not isinstance(haystack.dtype, IntervalDtype): try: needles = needles.astype(haystack.dtype) except ValueError: # Pandas raise KeyError here raise KeyError("Dtype mismatch in label lookup") elif needle_kind == haystack_kind or { haystack_kind, needle_kind, }.issubset({"i", "u", "f"}): needles = needles.astype(haystack.dtype) elif needles.dtype != haystack.dtype: # Pandas raise KeyError here raise KeyError("Dtype mismatch in label lookup") # Can't always do an inner join because then we can't check if we # had missing keys (can't check the length because the entries in # the needle might appear multiple times in the haystack). left_rows, right_rows = plc.join.left_join( plc.Table([needles.to_pylibcudf(mode="read")]), plc.Table([haystack.to_pylibcudf(mode="read")]), plc.types.NullEquality.EQUAL, ) right_order = plc.copying.gather( plc.Table( [ plc.filling.sequence( len(haystack), plc.Scalar.from_py(0), plc.Scalar.from_py(1) ) ] ), right_rows, plc.copying.OutOfBoundsPolicy.NULLIFY, ).columns()[0] if right_order.null_count() > 0: raise KeyError("Not all keys in index") left_order = plc.copying.gather( plc.Table( [ plc.filling.sequence( len(needles), plc.Scalar.from_py(0), plc.Scalar.from_py(1) ) ] ), left_rows, plc.copying.OutOfBoundsPolicy.DONT_CHECK, ).columns()[0] right_rows = plc.sorting.stable_sort_by_key( plc.Table([right_rows]), plc.Table([left_order, right_order]), [plc.types.Order.ASCENDING] * 2, [plc.types.NullOrder.AFTER] * 2, ).columns()[0] return GatherMap.from_column_unchecked( type(haystack).from_pylibcudf(right_rows), # type: ignore[arg-type] len(haystack), nullify=False, ) def find_label_range_or_mask( key: slice, index: Index ) -> EmptyIndexer | SliceIndexer: """ Convert a slice of labels into a slice of positions Parameters ---------- key Slice to convert index Index to look up in Returns ------- IndexingSpec Structured data for indexing (but never a :class:`ScalarIndexer`) Raises ------ KeyError If the index is unsorted and not a DatetimeIndex """ parsed_key = index.find_label_range(key) if len(range(len(index))[parsed_key]) == 0: return EmptyIndexer() else: return SliceIndexer(parsed_key) def parse_single_row_loc_key( key: Any, index: Index, ) -> IndexingSpec: """ Turn a single label-based row indexer into structured information. This converts label-based lookups into structured positional lookups. Valid values for the key are - a slice (endpoints are looked up) - a scalar label - a boolean mask of the same length as the index - a column of labels to look up (may be empty) Parameters ---------- key Key for label-based row indexing index Index to act as haystack for labels Returns ------- IndexingSpec Structured information for indexing Raises ------ KeyError If any label is not found ValueError If labels cannot be coerced to index dtype """ n = len(index) if isinstance(key, slice): return find_label_range_or_mask(key, index) else: is_scalar = _is_scalar_or_zero_d_array(key) if is_scalar and isinstance(key, np.ndarray): key = as_column(key.item()) else: key = as_column(key) if ( isinstance(key.dtype, CategoricalDtype) and index.dtype != key.dtype ): # TODO: is this right? key = key._get_decategorized_column() if len(key) == 0: return EmptyIndexer() else: # TODO: promote to Index objects, so this can handle # categoricals correctly? if key.dtype.kind == "b": if is_scalar and index.dtype.kind != "b": raise KeyError( "boolean label cannot be used without a boolean index" ) else: return MaskIndexer(BooleanMask(key, n)) elif index.dtype.kind == "M": # Try to turn strings into datetimes key = as_column(key, dtype=index.dtype) haystack = index._column gather_map = ordered_find(key, haystack) if is_scalar and len(gather_map.column) == 1: return ScalarIndexer(gather_map) else: return MapIndexer(gather_map) def parse_row_loc_indexer(key: Any, index: Index) -> IndexingSpec: """ Normalize to return structured information for a label-based row indexer. Given a label-based row indexer that has already been destructured by :func:`destructure_loc_key`, inspect further and produce structured information for indexing operations to act upon. Parameters ---------- key Suitably destructured key for row indexing index Index to provide context Returns ------- IndexingSpec Structured data for indexing. A tag + parsed data. Raises ------ KeyError If a valid type of indexer is provided, but not all keys are found TypeError If the indexing key is otherwise invalid. """ if isinstance(index, MultiIndex): raise NotImplementedError( "This code path is not designed for MultiIndex" ) # TODO: multiindices need to be treated separately if key is Ellipsis: # Ellipsis is handled here because multiindex level-based # indices don't handle ellipsis in pandas. return SliceIndexer(slice(None)) else: return parse_single_row_loc_key(key, index)
ScalarIndexer
python
ansible__ansible
test/lib/ansible_test/_internal/cli/argparsing/argcompletion.py
{ "start": 3503, "end": 5167 }
class ____(CompletionFinder): """ Custom completion finder for argcomplete. It provides support for running completion in list mode, which argcomplete natively handles the same as standard completion. """ enabled = bool(argcomplete) def __init__(self, *args, validator=None, **kwargs) -> None: if validator: raise ValueError() self.comp_type = get_comp_type() self.list_mode = self.comp_type.list_mode if self.comp_type else False self.disable_completion_mangling = False finder = self def custom_validator(completion, prefix): """Completion validator used to optionally bypass validation.""" if finder.disable_completion_mangling: return True return default_validator(completion, prefix) super().__init__( *args, validator=custom_validator, **kwargs, ) def __call__(self, *args, **kwargs): if self.enabled: super().__call__(*args, **kwargs) def quote_completions(self, completions, cword_prequote, last_wordbreak_pos): """Intercept default quoting behavior to optionally block mangling of completion entries.""" if self.disable_completion_mangling: # Word breaks have already been handled when generating completions, don't mangle them further. # This is needed in many cases when returning completion lists which lack the existing completion prefix. last_wordbreak_pos = None return super().quote_completions(completions, cword_prequote, last_wordbreak_pos)
OptionCompletionFinder
python
getsentry__sentry
src/sentry/sentry_apps/api/endpoints/sentry_app_webhook_requests.py
{ "start": 2158, "end": 5724 }
class ____(SentryAppBaseEndpoint): owner = ApiOwner.ECOSYSTEM publish_status = { "GET": ApiPublishStatus.EXPERIMENTAL, } permission_classes = (SentryAppStatsPermission,) def get(self, request: Request, sentry_app: SentryApp) -> Response: """ :qparam string eventType: Optionally specify a specific event type to filter requests :qparam bool errorsOnly: If this is true, only return error/warning requests (300-599) :qparam string organizationSlug: Optionally specify an org slug to filter requests :qparam string start: Optionally specify a date to begin at. Format must be YYYY-MM-DD HH:MM:SS :qparam string end: Optionally specify a date to end at. Format must be YYYY-MM-DD HH:MM:SS """ serializer = IncomingRequestSerializer(data=request.GET) if not serializer.is_valid(): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) serialized = serializer.validated_data event_type = serialized.get("eventType") errors_only = serialized.get("errorsOnly") org_slug = serialized.get("organizationSlug") start_time = serialized.get("start") end_time = serialized.get("end") organization = None if org_slug: try: organization = OrganizationMapping.objects.get(slug=org_slug) except OrganizationMapping.DoesNotExist: return Response({"detail": "Invalid organization."}, status=400) requests: list[BufferedRequest] = [] control_filter: SentryAppRequestFilterArgs = {} region_filter: SentryAppRequestFilterArgs = {} control_filter["errors_only"] = region_filter["errors_only"] = errors_only datetime_org_filter: DatetimeOrganizationFilterArgs = { "start_time": start_time, "end_time": end_time, "organization": organization, } # If event type is installation.created or installation.deleted, we only need to fetch requests from the control buffer if event_type == "installation.created" or event_type == "installation.deleted": control_filter["event"] = event_type requests.extend( get_buffer_requests_from_control(sentry_app, control_filter, datetime_org_filter) ) # If event type has been specified, we only need to fetch requests from region buffers elif event_type: region_filter["event"] = event_type requests.extend( get_buffer_requests_from_regions(sentry_app.id, region_filter, datetime_org_filter) ) else: control_filter["event"] = [ "installation.created", "installation.deleted", ] requests.extend( get_buffer_requests_from_control(sentry_app, control_filter, datetime_org_filter) ) region_filter["event"] = list( set(EXTENDED_VALID_EVENTS) - { "installation.created", "installation.deleted", } ) requests.extend( get_buffer_requests_from_regions(sentry_app.id, region_filter, datetime_org_filter) ) requests.sort(key=lambda x: parse_date(x.data.date), reverse=True) return Response( serialize(requests, request.user, SentryAppWebhookRequestSerializer(sentry_app)) )
SentryAppWebhookRequestsEndpoint
python
imageio__imageio
imageio/plugins/_tifffile.py
{ "start": 176347, "end": 182175 }
class ____(object): """Sequence of TIFF files. The image data in all files must match shape, dtype, etc. Attributes ---------- files : list List of file names. shape : tuple Shape of image sequence. Excludes shape of image array. axes : str Labels of axes in shape. Examples -------- >>> # read image stack from sequence of TIFF files >>> imsave('temp_C001T001.tif', numpy.random.rand(64, 64)) >>> imsave('temp_C001T002.tif', numpy.random.rand(64, 64)) >>> tifs = TiffSequence('temp_C001*.tif') >>> tifs.shape (1, 2) >>> tifs.axes 'CT' >>> data = tifs.asarray() >>> data.shape (1, 2, 64, 64) """ _patterns = { "axes": r""" # matches Olympus OIF and Leica TIFF series _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4})) _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? """ } class ParseError(Exception): pass def __init__(self, files, imread=TiffFile, pattern="axes", *args, **kwargs): """Initialize instance from multiple files. Parameters ---------- files : str, pathlib.Path, or sequence thereof Glob pattern or sequence of file names. Binary streams are not supported. imread : function or class Image read function or class with asarray function returning numpy array from single file. pattern : str Regular expression pattern that matches axes names and sequence indices in file names. By default, the pattern matches Olympus OIF and Leica TIFF series. """ if isinstance(files, pathlib.Path): files = str(files) if isinstance(files, basestring): files = natural_sorted(glob.glob(files)) files = list(files) if not files: raise ValueError("no files found") if isinstance(files[0], pathlib.Path): files = [str(pathlib.Path(f)) for f in files] elif not isinstance(files[0], basestring): raise ValueError("not a file name") self.files = files if hasattr(imread, "asarray"): # redefine imread _imread = imread def imread(fname, *args, **kwargs): with _imread(fname) as im: return im.asarray(*args, **kwargs) self.imread = imread self.pattern = self._patterns.get(pattern, pattern) try: self._parse() if not self.axes: self.axes = "I" except self.ParseError: self.axes = "I" self.shape = (len(files),) self._startindex = (0,) self._indices = tuple((i,) for i in range(len(files))) def __str__(self): """Return string with information about image sequence.""" return "\n".join( [ self.files[0], " size: %i" % len(self.files), " axes: %s" % self.axes, " shape: %s" % str(self.shape), ] ) def __len__(self): return len(self.files) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def close(self): pass def asarray(self, out=None, *args, **kwargs): """Read image data from all files and return as numpy array. The args and kwargs parameters are passed to the imread function. Raise IndexError or ValueError if image shapes do not match. """ im = self.imread(self.files[0], *args, **kwargs) shape = self.shape + im.shape result = create_output(out, shape, dtype=im.dtype) result = result.reshape(-1, *im.shape) for index, fname in zip(self._indices, self.files): index = [i - j for i, j in zip(index, self._startindex)] index = numpy.ravel_multi_index(index, self.shape) im = self.imread(fname, *args, **kwargs) result[index] = im result.shape = shape return result def _parse(self): """Get axes and shape from file names.""" if not self.pattern: raise self.ParseError("invalid pattern") pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE) matches = pattern.findall(self.files[0]) if not matches: raise self.ParseError("pattern does not match file names") matches = matches[-1] if len(matches) % 2: raise self.ParseError("pattern does not match axis name and index") axes = "".join(m for m in matches[::2] if m) if not axes: raise self.ParseError("pattern does not match file names") indices = [] for fname in self.files: matches = pattern.findall(fname)[-1] if axes != "".join(m for m in matches[::2] if m): raise ValueError("axes do not match within the image sequence") indices.append([int(m) for m in matches[1::2] if m]) shape = tuple(numpy.max(indices, axis=0)) startindex = tuple(numpy.min(indices, axis=0)) shape = tuple(i - j + 1 for i, j in zip(shape, startindex)) if product(shape) != len(self.files): warnings.warn("files are missing. Missing data are zeroed") self.axes = axes.upper() self.shape = shape self._indices = indices self._startindex = startindex
TiffSequence
python
scipy__scipy
scipy/optimize/tests/test_direct.py
{ "start": 237, "end": 13267 }
class ____: def setup_method(self): self.fun_calls = threading.local() self.bounds_sphere = 4*[(-2, 3)] self.optimum_sphere_pos = np.zeros((4, )) self.optimum_sphere = 0.0 self.bounds_stylinski_tang = Bounds([-4., -4.], [4., 4.]) self.maxiter = 1000 # test functions def sphere(self, x): if not hasattr(self.fun_calls, 'c'): self.fun_calls.c = 0 self.fun_calls.c += 1 return np.square(x).sum() def inv(self, x): if np.sum(x) == 0: raise ZeroDivisionError() return 1/np.sum(x) def nan_fun(self, x): return np.nan def inf_fun(self, x): return np.inf def styblinski_tang(self, pos): x, y = pos return 0.5 * (x**4 - 16 * x**2 + 5 * x + y**4 - 16 * y**2 + 5 * y) @pytest.mark.parametrize("locally_biased", [True, False]) def test_direct(self, locally_biased): res = direct(self.sphere, self.bounds_sphere, locally_biased=locally_biased) # test accuracy assert_allclose(res.x, self.optimum_sphere_pos, rtol=1e-3, atol=1e-3) assert_allclose(res.fun, self.optimum_sphere, atol=1e-5, rtol=1e-5) # test that result lies within bounds _bounds = np.asarray(self.bounds_sphere) assert_array_less(_bounds[:, 0], res.x) assert_array_less(res.x, _bounds[:, 1]) # test number of function evaluations. Original DIRECT overshoots by # up to 500 evaluations in last iteration assert res.nfev <= 1000 * (len(self.bounds_sphere) + 1) # test that number of function evaluations is correct assert res.nfev == self.fun_calls.c # test that number of iterations is below supplied maximum assert res.nit <= self.maxiter @pytest.mark.parametrize("locally_biased", [True, False]) def test_direct_callback(self, locally_biased): # test that callback does not change the result res = direct(self.sphere, self.bounds_sphere, locally_biased=locally_biased) def callback(x): x = 2*x dummy = np.square(x) print("DIRECT minimization algorithm callback test") return dummy res_callback = direct(self.sphere, self.bounds_sphere, locally_biased=locally_biased, callback=callback) assert_allclose(res.x, res_callback.x) assert res.nit == res_callback.nit assert res.nfev == res_callback.nfev assert res.status == res_callback.status assert res.success == res_callback.success assert res.fun == res_callback.fun assert_allclose(res.x, res_callback.x) assert res.message == res_callback.message # test accuracy assert_allclose(res_callback.x, self.optimum_sphere_pos, rtol=1e-3, atol=1e-3) assert_allclose(res_callback.fun, self.optimum_sphere, atol=1e-5, rtol=1e-5) @pytest.mark.parametrize("locally_biased", [True, False]) def test_exception(self, locally_biased): bounds = 4*[(-10, 10)] with pytest.raises(ZeroDivisionError): direct(self.inv, bounds=bounds, locally_biased=locally_biased) @pytest.mark.parametrize("locally_biased", [True, False]) def test_nan(self, locally_biased): bounds = 4*[(-10, 10)] direct(self.nan_fun, bounds=bounds, locally_biased=locally_biased) @pytest.mark.parametrize("len_tol", [1e-3, 1e-4]) @pytest.mark.parametrize("locally_biased", [True, False]) def test_len_tol(self, len_tol, locally_biased): bounds = 4*[(-10., 10.)] res = direct(self.sphere, bounds=bounds, len_tol=len_tol, vol_tol=1e-30, locally_biased=locally_biased) assert res.status == 5 assert res.success assert_allclose(res.x, np.zeros((4, ))) message = ("The side length measure of the hyperrectangle containing " "the lowest function value found is below " f"len_tol={len_tol}") assert res.message == message @pytest.mark.parametrize("vol_tol", [1e-6, 1e-8]) @pytest.mark.parametrize("locally_biased", [True, False]) def test_vol_tol(self, vol_tol, locally_biased): bounds = 4*[(-10., 10.)] res = direct(self.sphere, bounds=bounds, vol_tol=vol_tol, len_tol=0., locally_biased=locally_biased) assert res.status == 4 assert res.success assert_allclose(res.x, np.zeros((4, ))) message = ("The volume of the hyperrectangle containing the lowest " f"function value found is below vol_tol={vol_tol}") assert res.message == message @pytest.mark.parametrize("f_min_rtol", [1e-3, 1e-5, 1e-7]) @pytest.mark.parametrize("locally_biased", [True, False]) def test_f_min(self, f_min_rtol, locally_biased): # test that desired function value is reached within # relative tolerance of f_min_rtol f_min = 1. bounds = 4*[(-2., 10.)] res = direct(self.sphere, bounds=bounds, f_min=f_min, f_min_rtol=f_min_rtol, locally_biased=locally_biased) assert res.status == 3 assert res.success assert res.fun < f_min * (1. + f_min_rtol) message = ("The best function value found is within a relative " f"error={f_min_rtol} of the (known) global optimum f_min") assert res.message == message def circle_with_args(self, x, a, b): return np.square(x[0] - a) + np.square(x[1] - b).sum() @pytest.mark.parametrize("locally_biased", [True, False]) def test_f_circle_with_args(self, locally_biased): bounds = 2*[(-2.0, 2.0)] res = direct(self.circle_with_args, bounds, args=(1, 1), maxfun=1250, locally_biased=locally_biased) assert_allclose(res.x, np.array([1., 1.]), rtol=1e-5) @pytest.mark.parametrize("locally_biased", [True, False]) def test_failure_maxfun(self, locally_biased): # test that if optimization runs for the maximal number of # evaluations, success = False is returned maxfun = 100 result = direct(self.styblinski_tang, self.bounds_stylinski_tang, maxfun=maxfun, locally_biased=locally_biased) assert result.success is False assert result.status == 1 assert result.nfev >= maxfun message = ("Number of function evaluations done is " f"larger than maxfun={maxfun}") assert result.message == message @pytest.mark.parametrize("locally_biased", [True, False]) def test_failure_maxiter(self, locally_biased): # test that if optimization runs for the maximal number of # iterations, success = False is returned maxiter = 10 result = direct(self.styblinski_tang, self.bounds_stylinski_tang, maxiter=maxiter, locally_biased=locally_biased) assert result.success is False assert result.status == 2 assert result.nit >= maxiter message = f"Number of iterations is larger than maxiter={maxiter}" assert result.message == message @pytest.mark.parametrize("locally_biased", [True, False]) def test_bounds_variants(self, locally_biased): # test that new and old bounds yield same result lb = [-6., 1., -5.] ub = [-1., 3., 5.] x_opt = np.array([-1., 1., 0.]) bounds_old = list(zip(lb, ub)) bounds_new = Bounds(lb, ub) res_old_bounds = direct(self.sphere, bounds_old, locally_biased=locally_biased) res_new_bounds = direct(self.sphere, bounds_new, locally_biased=locally_biased) assert res_new_bounds.nfev == res_old_bounds.nfev assert res_new_bounds.message == res_old_bounds.message assert res_new_bounds.success == res_old_bounds.success assert res_new_bounds.nit == res_old_bounds.nit assert_allclose(res_new_bounds.x, res_old_bounds.x) assert_allclose(res_new_bounds.x, x_opt, rtol=1e-2) @pytest.mark.parametrize("locally_biased", [True, False]) @pytest.mark.parametrize("eps", [1e-5, 1e-4, 1e-3]) def test_epsilon(self, eps, locally_biased): result = direct(self.styblinski_tang, self.bounds_stylinski_tang, eps=eps, vol_tol=1e-6, locally_biased=locally_biased) assert result.status == 4 assert result.success @pytest.mark.xslow @pytest.mark.parametrize("locally_biased", [True, False]) def test_no_segmentation_fault(self, locally_biased): # test that an excessive number of function evaluations # does not result in segmentation fault bounds = [(-5., 20.)] * 100 result = direct(self.sphere, bounds, maxfun=10000000, maxiter=1000000, locally_biased=locally_biased) assert result is not None @pytest.mark.parametrize("locally_biased", [True, False]) def test_inf_fun(self, locally_biased): # test that an objective value of infinity does not crash DIRECT bounds = [(-5., 5.)] * 2 result = direct(self.inf_fun, bounds, locally_biased=locally_biased) assert result is not None @pytest.mark.parametrize("len_tol", [-1, 2]) def test_len_tol_validation(self, len_tol): error_msg = "len_tol must be between 0 and 1." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, len_tol=len_tol) @pytest.mark.parametrize("vol_tol", [-1, 2]) def test_vol_tol_validation(self, vol_tol): error_msg = "vol_tol must be between 0 and 1." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, vol_tol=vol_tol) @pytest.mark.parametrize("f_min_rtol", [-1, 2]) def test_fmin_rtol_validation(self, f_min_rtol): error_msg = "f_min_rtol must be between 0 and 1." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, f_min_rtol=f_min_rtol, f_min=0.) @pytest.mark.parametrize("maxfun", [1.5, "string", (1, 2)]) def test_maxfun_wrong_type(self, maxfun): error_msg = "maxfun must be of type int." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, maxfun=maxfun) @pytest.mark.parametrize("maxiter", [1.5, "string", (1, 2)]) def test_maxiter_wrong_type(self, maxiter): error_msg = "maxiter must be of type int." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, maxiter=maxiter) def test_negative_maxiter(self): error_msg = "maxiter must be > 0." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, maxiter=-1) def test_negative_maxfun(self): error_msg = "maxfun must be > 0." with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, maxfun=-1) @pytest.mark.parametrize("bounds", ["bounds", 2., 0]) def test_invalid_bounds_type(self, bounds): error_msg = ("bounds must be a sequence or " "instance of Bounds class") with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, bounds) @pytest.mark.parametrize("bounds", [Bounds([-1., -1], [-2, 1]), Bounds([-np.nan, -1], [-2, np.nan]), ] ) def test_incorrect_bounds(self, bounds): error_msg = 'Bounds are not consistent min < max' with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, bounds) def test_inf_bounds(self): error_msg = 'Bounds must not be inf.' bounds = Bounds([-np.inf, -1], [-2, np.inf]) with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, bounds) @pytest.mark.parametrize("locally_biased", ["bias", [0, 0], 2.]) def test_locally_biased_validation(self, locally_biased): error_msg = 'locally_biased must be True or False.' with pytest.raises(ValueError, match=error_msg): direct(self.styblinski_tang, self.bounds_stylinski_tang, locally_biased=locally_biased)
TestDIRECT
python
apache__airflow
providers/google/tests/unit/google/cloud/triggers/test_dataproc.py
{ "start": 17670, "end": 21399 }
class ____: def test_async_cluster_trigger_serialization_should_execute_successfully(self, operation_trigger): classpath, kwargs = operation_trigger.serialize() assert classpath == "airflow.providers.google.cloud.triggers.dataproc.DataprocOperationTrigger" assert kwargs == { "name": TEST_OPERATION_NAME, "project_id": TEST_PROJECT_ID, "operation_type": None, "region": TEST_REGION, "gcp_conn_id": TEST_GCP_CONN_ID, "impersonation_chain": None, "polling_interval_seconds": TEST_POLL_INTERVAL, } @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocOperationTrigger.get_async_hook") async def test_async_operation_triggers_on_success_should_execute_successfully( self, mock_get_async_hook, operation_trigger ): mock_operation = mock.MagicMock() mock_operation.name = TEST_OPERATION_NAME mock_operation.done = True mock_operation.response = {} mock_operation.error = Status(message="") future = asyncio.Future() future.set_result(mock_operation) mock_get_async_hook.return_value.get_operation.return_value = future expected_event = TriggerEvent( { "operation_name": TEST_OPERATION_NAME, "operation_done": True, "status": "success", "message": "Operation is successfully ended.", } ) actual_event = await operation_trigger.run().asend(None) assert expected_event == actual_event @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocOperationTrigger.get_async_hook") async def test_async_diagnose_operation_triggers_on_success_should_execute_successfully( self, mock_get_async_hook, diagnose_operation_trigger ): gcs_uri = "gs://test-tarball-gcs-dir-bucket" mock_operation = mock.MagicMock() mock_operation.name = TEST_OPERATION_NAME mock_operation.done = True mock_operation.response = Any(value=gcs_uri.encode("utf-8")) mock_operation.error = Status(message="") future = asyncio.Future() future.set_result(mock_operation) mock_get_async_hook.return_value.get_operation.return_value = future expected_event = TriggerEvent( { "output_uri": gcs_uri, "status": "success", "message": "Operation is successfully ended.", } ) actual_event = await diagnose_operation_trigger.run().asend(None) assert expected_event == actual_event @pytest.mark.asyncio @mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocOperationTrigger.get_async_hook") async def test_async_operation_triggers_on_error(self, mock_get_async_hook, operation_trigger): mock_operation = mock.MagicMock() mock_operation.name = TEST_OPERATION_NAME mock_operation.done = True mock_operation.response = {} mock_operation.error = Status(message="test_error") future = asyncio.Future() future.set_result(mock_operation) mock_get_async_hook.return_value.get_operation.return_value = future expected_event = TriggerEvent( { "operation_name": TEST_OPERATION_NAME, "operation_done": True, "status": "error", "message": "test_error", } ) actual_event = await operation_trigger.run().asend(None) assert expected_event == actual_event
TestDataprocOperationTrigger
python
getsentry__sentry
src/sentry/seer/explorer/custom_tool_utils.py
{ "start": 871, "end": 976 }
class ____(BaseModel): """Simple boolean type.""" kind: Literal["boolean"] = "boolean"
BooleanType
python
tensorflow__tensorflow
tensorflow/python/training/monitored_session_test.py
{ "start": 35107, "end": 35650 }
class ____: """A creator that counts the number of created sessions.""" def __init__(self, session): self._initial_session = session # We only have one session per test case. We can't re-create it, thus # it shouldn't be closed. self._initial_session.close = lambda *args: None self._create_session_calls = 0 @property def number_of_sessions_created(self): return self._create_session_calls def create_session(self): self._create_session_calls += 1 return self._initial_session
CountingSessionCreator
python
getsentry__sentry
src/sentry/integrations/slack/message_builder/notifications/base.py
{ "start": 505, "end": 2563 }
class ____(BlockSlackMessageBuilder): def __init__( self, notification: BaseNotification, context: Mapping[str, Any], recipient: Actor, ) -> None: super().__init__() self.notification = notification self.context = context self.recipient = recipient def build(self) -> SlackBlock: callback_id_raw = self.notification.get_callback_data() title = self.notification.build_attachment_title(self.recipient) title_link = self.notification.get_title_link(self.recipient, ExternalProviders.SLACK) text = self.notification.get_message_description(self.recipient, ExternalProviders.SLACK) footer = self.notification.build_notification_footer( self.recipient, ExternalProviders.SLACK ) actions = self.notification.get_message_actions(self.recipient, ExternalProviders.SLACK) block_id = orjson.dumps(callback_id_raw).decode() if callback_id_raw else None first_block_text = "" if title_link: if title: first_block_text += f"<{title_link}|*{escape_slack_text(title)}*> \n" else: first_block_text += f"<{title_link}|*{escape_slack_text(title_link)}*> \n" elif title: # ie. "ZeroDivisionError", first_block_text += f"*{escape_slack_text(title)}* \n" if text: # ie. "division by zero", comments first_block_text += text blocks = [] if first_block_text: blocks.append(self.get_markdown_block(text=first_block_text)) if footer: blocks.append(self.get_context_block(text=footer)) actions_block = [] for action in actions: actions_block.append(self.get_button_action(action)) if actions_block: blocks.append({"type": "actions", "elements": [action for action in actions_block]}) return self._build_blocks(*blocks, fallback_text=text if text else None, block_id=block_id)
SlackNotificationsMessageBuilder
python
tensorflow__tensorflow
tensorflow/compiler/mlir/tfr/python/tfr_gen.py
{ "start": 2160, "end": 7860 }
class ____(enum.Enum): """All the supported types. 1-3: tfr types 4-99: mlir built-in types 100-199: TF related translator internal types 200- : Python related translator internal types """ TENSOR = 1 TENSOR_LIST = 2 ATTR = 3 NONE = 4 SHAPE = 5 # shape -> !shape.shape I1 = 21 I8 = 22 I16 = 23 I32 = 24 I64 = 25 F32 = 26 INDEX = 27 AG_UNDEFINED_VAL = 100 AG_BUILTIN_FUNC = 101 TF_RAW_OP = 102 TF_REGION = 103 TF_TENSOR_SHAPE_FUNC = 104 # shape.as_list TF_TENSOR_SHAPE_LIST = 105 # shape.as_list() PY_BUILTIN_FUNC = 200 TFR_BUILTIN_FUNC = 201 # As these are not real types, __getattribute__ helps them appear more like # actual types (i.e. class definitions). def __getattribute__(self, name): if name == 'shape' and object.__getattribute__(self, 'value') == 1: return TFRTypes.SHAPE if name == 'as_list' and object.__getattribute__(self, 'value') == 5: return TFRTypes.TF_TENSOR_SHAPE_FUNC return object.__getattribute__(self, name) def __str__(self): if self.value < 4: # pylint: disable=comparison-with-callable return '!tfr.' + self.name.lower() elif self.value < 10: # pylint: disable=comparison-with-callable return '!shape.' + self.name.lower() else: return self.name.lower() _ATTRIBUTE_TYPES = ( TFRTypes.I1, TFRTypes.I32, TFRTypes.I64, TFRTypes.F32, TFRTypes.INDEX, TFRTypes.ATTR ) # TODO(b/203493652): implement the "rename_to" for the customization in # tensorflow/core/api_def/base_api/* # {op_name: {API's attribute name: OpDef's attribute name}} _ATTRIBUTE_RENAMES = { 'Mean': {'axis': 'reduction_indices'}, 'Split': {'axis': 'split_dim'}, 'SplitV': {'axis': 'split_dim'}, } def _get_type_from_proto(arg_def=None, attr_def=None): if not arg_def: if attr_def.type == 'bool': return TFRTypes.I1 elif attr_def.type == 'int32': return TFRTypes.I32 elif attr_def.type == 'int' or attr_def.type == 'int64': return TFRTypes.I64 elif attr_def.type == 'float': return TFRTypes.F32 else: return TFRTypes.ATTR if arg_def.number_attr or arg_def.type_list_attr: return TFRTypes.TENSOR_LIST else: return TFRTypes.TENSOR def _get_type_info_from_proto(arg_def=None, attr_def=None): attr_type = _get_type_from_proto(arg_def, attr_def) if not arg_def: return '{}{{tfr.name="{}",tfr.type="{}"}}'.format( attr_type, attr_def.name, attr_def.type) else: attr_names = [] if arg_def.number_attr: attr_names.append(arg_def.number_attr) if arg_def.type_attr: attr_names.append(arg_def.type_attr) if arg_def.type_list_attr: attr_names.append(arg_def.type_list_attr) # TODO(fengliuai): currently we don't support backward type inference, so we # have to store these non-derivable type in the signatures, and then they # can be used to cast the values when raising to tf ops. if arg_def.type == types_pb2.DT_FLOAT: attr_names.append('f32_') elif arg_def.type == types_pb2.DT_INT32: attr_names.append('i32_') elif arg_def.type == types_pb2.DT_INT64: attr_names.append('i64_') elif arg_def.type == types_pb2.DT_BOOL: attr_names.append('i1_') if not attr_names: return str(attr_type) else: return '{}<{}>'.format(attr_type, ','.join(attr_names)) def _get_val_from_proto(attr_type, attr_val): if attr_type == TFRTypes.I1: return 'true' if attr_val.b else 'false' elif attr_type == TFRTypes.I32 or attr_type == TFRTypes.I64: return attr_val.i elif attr_type == TFRTypes.F32: return attr_val.f elif attr_type == TFRTypes.ATTR: # string if attr_val.HasField('s'): return '"{}"'.format(attr_val.s.decode()) # type if attr_val.HasField('type'): if attr_val.type == types_pb2.DT_FLOAT: return 'f32' elif attr_val.type == types_pb2.DT_INT32: return 'i32' elif attr_val.type == types_pb2.DT_INT64: return 'i64' elif attr_val.type == types_pb2.DT_BOOL: return 'i1' # list if attr_val.HasField('list'): if attr_val.list.f: elt_ty = TFRTypes.F32 values = attr_val.list.f elif attr_val.list.i: elt_ty = TFRTypes.I64 values = attr_val.list.i else: elt_ty = TFRTypes.NONE values = [] array_attr_elts = ['{}:{}'.format(val, elt_ty) for val in values] return '[{}]'.format(','.join(array_attr_elts)) raise NotImplementedError( 'Proto AttrValue not recognized. type: {}, value: {}'.format( attr_type, attr_val)) def _collect_derived_attrs_from_proto(op_def): derived_attrs = set() for arg in op_def.input_arg: if arg.type_attr: derived_attrs.add(arg.type_attr) if arg.number_attr: derived_attrs.add(arg.number_attr) if arg.type_list_attr: derived_attrs.add(arg.type_list_attr) # TODO(fengliuai): currently we don't support backward type inference, so we # have to store these non-derivable type in the signatures, and then they # can be used to cast the values when raising to tf ops. if arg.type == types_pb2.DT_FLOAT: derived_attrs.add('f32_') elif arg.type == types_pb2.DT_INT32: derived_attrs.add('i32_') elif arg.type == types_pb2.DT_INT64: derived_attrs.add('i64_') elif arg.type == types_pb2.DT_BOOL: derived_attrs.add('i1_') return derived_attrs def _require_tensor_list(arg_def): return arg_def.type_list_attr or arg_def.number_attr def _camel_to_snake(name): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
TFRTypes
python
celery__celery
t/unit/utils/test_time.py
{ "start": 11850, "end": 12088 }
class ____: def test_repr(self): x = ffwd(year=2012) assert repr(x) def test_radd_with_unknown_gives_NotImplemented(self): x = ffwd(year=2012) assert x.__radd__(object()) == NotImplemented
test_ffwd
python
getsentry__sentry
src/sentry/migrations/0965_gzippeddict_big_tables.py
{ "start": 188, "end": 1892 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("sentry", "0964_add_commitcomparison_table"), ] operations = [ migrations.AlterField( model_name="activity", name="data", field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict, null=True), ), migrations.AlterField( model_name="group", name="data", field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(null=True), ), migrations.AlterField( model_name="rule", name="data", field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(default=dict), ), ]
Migration
python
astropy__astropy
astropy/coordinates/builtin_frames/hadec.py
{ "start": 3562, "end": 5830 }
class ____(BaseCoordinateFrame): """ A coordinate or frame in the Hour Angle-Declination system (Equatorial coordinates) with respect to the WGS84 ellipsoid. Hour Angle is oriented with respect to upper culmination such that the hour angle is negative to the East and positive to the West. This frame is assumed to *include* refraction effects if the ``pressure`` frame attribute is non-zero. The frame attributes are listed under **Other Parameters**, which are necessary for transforming from HADec to some other system. """ frame_specific_representation_info = { r.SphericalRepresentation: [ RepresentationMapping("lon", "ha", u.hourangle), RepresentationMapping("lat", "dec"), ] } default_representation = r.SphericalRepresentation default_differential = r.SphericalCosLatDifferential obstime = TimeAttribute( default=None, doc="The reference time (e.g., time of observation)" ) location = EarthLocationAttribute( default=None, doc="The location on Earth of the observer" ) pressure = QuantityAttribute(default=0, unit=u.hPa, doc="The atmospheric pressure") temperature = QuantityAttribute( default=0, unit=u.deg_C, doc="The ground-level temperature" ) relative_humidity = QuantityAttribute( default=0, unit=u.dimensionless_unscaled, doc="The relative humidity" ) obswl = QuantityAttribute( default=1 * u.micron, unit=u.micron, doc="The average wavelength of observations", ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.has_data: self._set_data_lon_wrap_angle(self.data) @staticmethod def _set_data_lon_wrap_angle(data): if hasattr(data, "lon"): data.lon.wrap_angle = 180.0 * u.deg return data def represent_as(self, base, s="base", in_frame_units=False): """ Ensure the wrap angle for any spherical representations. """ data = super().represent_as(base, s, in_frame_units=in_frame_units) self._set_data_lon_wrap_angle(data) return data # self-transform defined in icrs_observed_transforms.py
HADec
python
TheAlgorithms__Python
graphs/multi_heuristic_astar.py
{ "start": 70, "end": 8564 }
class ____: def __init__(self): self.elements = [] self.set = set() def minkey(self): if not self.empty(): return self.elements[0][0] else: return float("inf") def empty(self): return len(self.elements) == 0 def put(self, item, priority): if item not in self.set: heapq.heappush(self.elements, (priority, item)) self.set.add(item) else: # update # print("update", item) temp = [] (pri, x) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) (pri, x) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements, (pro, xxx)) def remove_element(self, item): if item in self.set: self.set.remove(item) temp = [] (pro, x) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) (pro, x) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements, (prito, yyy)) def top_show(self): return self.elements[0][1] def get(self): (priority, item) = heapq.heappop(self.elements) self.set.remove(item) return (priority, item) def consistent_heuristic(p: TPos, goal: TPos): # euclidean distance a = np.array(p) b = np.array(goal) return np.linalg.norm(a - b) def heuristic_2(p: TPos, goal: TPos): # integer division by time variable return consistent_heuristic(p, goal) // t def heuristic_1(p: TPos, goal: TPos): # manhattan distance return abs(p[0] - goal[0]) + abs(p[1] - goal[1]) def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]): ans = g_function[start] + W1 * heuristics[i](start, goal) return ans def do_something(back_pointer, goal, start): grid = np.char.chararray((n, n)) for i in range(n): for j in range(n): grid[i][j] = "*" for i in range(n): for j in range(n): if (j, (n - 1) - i) in blocks: grid[i][j] = "#" grid[0][(n - 1)] = "-" x = back_pointer[goal] while x != start: (x_c, y_c) = x # print(x) grid[(n - 1) - y_c][x_c] = "-" x = back_pointer[x] grid[(n - 1)][0] = "-" for i in range(n): for j in range(n): if (i, j) == (0, n - 1): print(grid[i][j], end=" ") print("<-- End position", end=" ") else: print(grid[i][j], end=" ") print() print("^") print("Start position") print() print("# is an obstacle") print("- is the path taken by algorithm") print("PATH TAKEN BY THE ALGORITHM IS:-") x = back_pointer[goal] while x != start: print(x, end=" ") x = back_pointer[x] print(x) sys.exit() def valid(p: TPos): if p[0] < 0 or p[0] > n - 1: return False return not (p[1] < 0 or p[1] > n - 1) def expand_state( s, j, visited, g_function, close_list_anchor, close_list_inad, open_list, back_pointer, ): for itera in range(n_heuristic): open_list[itera].remove_element(s) # print("s", s) # print("j", j) (x, y) = s left = (x - 1, y) right = (x + 1, y) up = (x, y + 1) down = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(neighbours) and neighbours not in visited: # print("neighbour", neighbours) visited.add(neighbours) back_pointer[neighbours] = -1 g_function[neighbours] = float("inf") if valid(neighbours) and g_function[neighbours] > g_function[s] + 1: g_function[neighbours] = g_function[s] + 1 back_pointer[neighbours] = s if neighbours not in close_list_anchor: open_list[0].put(neighbours, key(neighbours, 0, goal, g_function)) if neighbours not in close_list_inad: for var in range(1, n_heuristic): if key(neighbours, var, goal, g_function) <= W2 * key( neighbours, 0, goal, g_function ): open_list[j].put( neighbours, key(neighbours, var, goal, g_function) ) def make_common_ground(): some_list = [] for x in range(1, 5): for y in range(1, 6): some_list.append((x, y)) for x in range(15, 20): some_list.append((x, 17)) for x in range(10, 19): for y in range(1, 15): some_list.append((x, y)) # L block for x in range(1, 4): for y in range(12, 19): some_list.append((x, y)) for x in range(3, 13): for y in range(16, 19): some_list.append((x, y)) return some_list heuristics = {0: consistent_heuristic, 1: heuristic_1, 2: heuristic_2} blocks_blk = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] blocks_all = make_common_ground() blocks = blocks_blk # hyper parameters W1 = 1 W2 = 1 n = 20 n_heuristic = 3 # one consistent and two other inconsistent # start and end destination start = (0, 0) goal = (n - 1, n - 1) t = 1 def multi_a_star(start: TPos, goal: TPos, n_heuristic: int): g_function = {start: 0, goal: float("inf")} back_pointer = {start: -1, goal: -1} open_list = [] visited = set() for i in range(n_heuristic): open_list.append(PriorityQueue()) open_list[i].put(start, key(start, i, goal, g_function)) close_list_anchor: list[int] = [] close_list_inad: list[int] = [] while open_list[0].minkey() < float("inf"): for i in range(1, n_heuristic): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= W2 * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("inf"): do_something(back_pointer, goal, start) else: _, get_s = open_list[i].top_show() visited.add(get_s) expand_state( get_s, i, visited, g_function, close_list_anchor, close_list_inad, open_list, back_pointer, ) close_list_inad.append(get_s) elif g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("inf"): do_something(back_pointer, goal, start) else: get_s = open_list[0].top_show() visited.add(get_s) expand_state( get_s, 0, visited, g_function, close_list_anchor, close_list_inad, open_list, back_pointer, ) close_list_anchor.append(get_s) print("No path found to goal") print() for i in range(n - 1, -1, -1): for j in range(n): if (j, i) in blocks: print("#", end=" ") elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("*", end=" ") else: print("-", end=" ") else: print("*", end=" ") if (j, i) == (n - 1, n - 1): print("<-- End position", end=" ") print() print("^") print("Start position") print() print("# is an obstacle") print("- is the path taken by algorithm") if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
PriorityQueue
python
getsentry__sentry
src/sentry/preprod/api/models/launchpad.py
{ "start": 490, "end": 710 }
class ____(BaseModel): model_config = ConfigDict() state: Literal[PreprodArtifactSizeMetrics.SizeAnalysisState.PROCESSING] = ( PreprodArtifactSizeMetrics.SizeAnalysisState.PROCESSING )
PutSizeProcessing
python
huggingface__transformers
src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
{ "start": 66688, "end": 69675 }
class ____(SeamlessM4Tv2PreTrainedModel): main_input_name = "input_features" input_modalities = "audio" def __init__(self, config: SeamlessM4Tv2Config): super().__init__(config) self.feature_projection = SeamlessM4Tv2ConformerFeatureProjection(config) self.encoder = SeamlessM4Tv2ConformerEncoder(config) self.intermediate_ffn = SeamlessM4Tv2ConformerFeedForward(config, act_fn="relu", dropout=0.0) self.adapter = SeamlessM4Tv2ConformerAdapter(config) if config.add_adapter else None self.inner_layer_norm = nn.LayerNorm(config.hidden_size) # Initialize weights and apply final processing self.post_init() def forward( self, input_features: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, Wav2Vec2BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_features is None: raise ValueError( """Both `input_features` and `inputs_embeds` are `None` in `SeamlessM4Tv2SpeechEncoder.forward`. Make sure one of them is not `None`.""" ) hidden_states = self.feature_projection(input_features) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] expanded_hidden_states = self.intermediate_ffn(hidden_states) hidden_states = hidden_states + 0.5 * expanded_hidden_states if self.adapter is not None: hidden_states = self.adapter(hidden_states, attention_mask=attention_mask) hidden_states = self.inner_layer_norm(hidden_states) if not return_dict: return (hidden_states,) + encoder_outputs[1:] return Wav2Vec2BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # inspired from MBart and NllbMoe @auto_docstring( custom_intro=""" Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`SeamlessM4Tv2EncoderLayer`]. """ ) # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TEncoder with SeamlessM4T->SeamlessM4Tv2
SeamlessM4Tv2SpeechEncoder
python
getsentry__sentry
tests/sentry/runner/commands/test_backup.py
{ "start": 15325, "end": 19565 }
class ____(TestCase): """ Test success cases of the `sentry sanitize` CLI command on decrypted inputs with encrypted outputs. """ def test_sanitize_with_decryption_and_encryption(self) -> None: with TemporaryDirectory() as tmp_dir: tmp_sanitized_encrypted_path = Path(tmp_dir).joinpath("sanitized_encrypted.tar") ( tmp_priv_key_path, tmp_pub_key_path, tmp_unsanitized_encrypted_path, ) = create_encryption_test_files(tmp_dir) rv = CliRunner().invoke( backup, [ "sanitize", str(tmp_sanitized_encrypted_path), "--src", str(tmp_unsanitized_encrypted_path), "--decrypt-with", str(tmp_priv_key_path), "--encrypt-with", str(tmp_pub_key_path), ], ) assert rv.exit_code == 0, rv.output @patch("sentry.backup.crypto.KeyManagementServiceClient") def test_sanitize_with_gcp_kms_decryption_and_encryption( self, fake_kms_client: mock.Mock ) -> None: with TemporaryDirectory() as tmp_dir: tmp_sanitized_encrypted_path = Path(tmp_dir).joinpath("sanitized_encrypted.tar") ( tmp_priv_key_path, tmp_pub_key_path, tmp_unsanitized_encrypted_path, ) = create_encryption_test_files(tmp_dir) gcp_kms_config_path = mock_gcp_kms_asymmetric_decrypt( tmp_dir, tmp_priv_key_path, tmp_unsanitized_encrypted_path, fake_kms_client ) # Mock out the GCP KMS reply for the public key retrieval. with open(tmp_pub_key_path, "rb") as f: fake_kms_client.return_value.get_public_key.return_value = SimpleNamespace( pem=f.read().decode() ) # needed? fake_kms_client.reset_mock() rv = CliRunner().invoke( backup, [ "sanitize", str(tmp_sanitized_encrypted_path), "--src", str(tmp_unsanitized_encrypted_path), "--decrypt-with-gcp-kms", str(gcp_kms_config_path), "--encrypt-with-gcp-kms", str(gcp_kms_config_path), ], catch_exceptions=False, ) assert rv.exit_code == 0, rv.output assert fake_kms_client.return_value.asymmetric_decrypt.call_count == 1 assert fake_kms_client.return_value.get_public_key.call_count == 1 def cli_import_then_export( scope: str, *, import_args: Sequence[str] = (), export_args: Sequence[str] = () ) -> None: with TemporaryDirectory() as tmp_dir: tmp_in_findings = Path(tmp_dir).joinpath( f"{''.join(choice(ascii_letters)for _ in range(6))}.json" ) rv = CliRunner().invoke( import_, [ scope, GOOD_FILE_PATH, "--no-prompt", "--findings-file", str(tmp_in_findings), *import_args, ], catch_exceptions=False, ) assert rv.exit_code == 0, rv.output with open(tmp_in_findings) as f: findings = json.load(f) assert len(findings) == 0 tmp_out_findings = Path(tmp_dir).joinpath( f"{''.join(choice(ascii_letters)for _ in range(6))}.json" ) tmp_out_path = Path(tmp_dir).joinpath("good.json") rv = CliRunner().invoke( export, [ scope, str(tmp_out_path), "--no-prompt", "--findings-file", str(tmp_out_findings), *export_args, ], catch_exceptions=False, ) assert rv.exit_code == 0, rv.output with open(tmp_out_findings) as f: findings = json.load(f) assert len(findings) == 0
GoodSanitizeCommandEncryptionTests
python
coleifer__peewee
tests/fields.py
{ "start": 49570, "end": 49651 }
class ____(TestModel): text_field = TextField() char_field = CharField()
SM
python
django__django
tests/urlpatterns_reverse/middleware.py
{ "start": 315, "end": 440 }
class ____(MiddlewareMixin): def process_request(self, request): request.urlconf = None
NullChangeURLconfMiddleware
python
doocs__leetcode
lcof/面试题45. 把数组排成最小的数/Solution.py
{ "start": 0, "end": 262 }
class ____: def minNumber(self, nums: List[int]) -> str: def cmp(a, b): x, y = a + b, b + a return -1 if x < y else 1 ans = [str(x) for x in nums] ans.sort(key=cmp_to_key(cmp)) return "".join(ans)
Solution
python
kamyu104__LeetCode-Solutions
Python/count-odd-numbers-in-an-interval-range.py
{ "start": 29, "end": 224 }
class ____(object): def countOdds(self, low, high): """ :type low: int :type high: int :rtype: int """ return (high+1)//2 - ((low-1)+1)//2
Solution
python
redis__redis-py
redis/connection.py
{ "start": 6375, "end": 21483 }
class ____: """ Abstract class for handling maintenance notifications logic. This class is expected to be used as base class together with ConnectionInterface. This class is intended to be used with multiple inheritance! All logic related to maintenance notifications is encapsulated in this class. """ def __init__( self, maint_notifications_config: Optional[MaintNotificationsConfig], maint_notifications_pool_handler: Optional[ MaintNotificationsPoolHandler ] = None, maintenance_state: "MaintenanceState" = MaintenanceState.NONE, maintenance_notification_hash: Optional[int] = None, orig_host_address: Optional[str] = None, orig_socket_timeout: Optional[float] = None, orig_socket_connect_timeout: Optional[float] = None, parser: Optional[Union[_HiredisParser, _RESP3Parser]] = None, ): """ Initialize the maintenance notifications for the connection. Args: maint_notifications_config (MaintNotificationsConfig): The configuration for maintenance notifications. maint_notifications_pool_handler (Optional[MaintNotificationsPoolHandler]): The pool handler for maintenance notifications. maintenance_state (MaintenanceState): The current maintenance state of the connection. maintenance_notification_hash (Optional[int]): The current maintenance notification hash of the connection. orig_host_address (Optional[str]): The original host address of the connection. orig_socket_timeout (Optional[float]): The original socket timeout of the connection. orig_socket_connect_timeout (Optional[float]): The original socket connect timeout of the connection. parser (Optional[Union[_HiredisParser, _RESP3Parser]]): The parser to use for maintenance notifications. If not provided, the parser from the connection is used. This is useful when the parser is created after this object. """ self.maint_notifications_config = maint_notifications_config self.maintenance_state = maintenance_state self.maintenance_notification_hash = maintenance_notification_hash self._configure_maintenance_notifications( maint_notifications_pool_handler, orig_host_address, orig_socket_timeout, orig_socket_connect_timeout, parser, ) @abstractmethod def _get_parser(self) -> Union[_HiredisParser, _RESP3Parser]: pass @abstractmethod def _get_socket(self) -> Optional[socket.socket]: pass @abstractmethod def get_protocol(self) -> Union[int, str]: """ Returns: The RESP protocol version, or ``None`` if the protocol is not specified, in which case the server default will be used. """ pass @property @abstractmethod def host(self) -> str: pass @host.setter @abstractmethod def host(self, value: str): pass @property @abstractmethod def socket_timeout(self) -> Optional[Union[float, int]]: pass @socket_timeout.setter @abstractmethod def socket_timeout(self, value: Optional[Union[float, int]]): pass @property @abstractmethod def socket_connect_timeout(self) -> Optional[Union[float, int]]: pass @socket_connect_timeout.setter @abstractmethod def socket_connect_timeout(self, value: Optional[Union[float, int]]): pass @abstractmethod def send_command(self, *args, **kwargs): pass @abstractmethod def read_response( self, disable_decoding=False, *, disconnect_on_error=True, push_request=False, ): pass @abstractmethod def disconnect(self, *args): pass def _configure_maintenance_notifications( self, maint_notifications_pool_handler: Optional[ MaintNotificationsPoolHandler ] = None, orig_host_address=None, orig_socket_timeout=None, orig_socket_connect_timeout=None, parser: Optional[Union[_HiredisParser, _RESP3Parser]] = None, ): """ Enable maintenance notifications by setting up handlers and storing original connection parameters. Should be used ONLY with parsers that support push notifications. """ if ( not self.maint_notifications_config or not self.maint_notifications_config.enabled ): self._maint_notifications_pool_handler = None self._maint_notifications_connection_handler = None return if not parser: raise RedisError( "To configure maintenance notifications, a parser must be provided!" ) if not isinstance(parser, _HiredisParser) and not isinstance( parser, _RESP3Parser ): raise RedisError( "Maintenance notifications are only supported with hiredis and RESP3 parsers!" ) if maint_notifications_pool_handler: # Extract a reference to a new pool handler that copies all properties # of the original one and has a different connection reference # This is needed because when we attach the handler to the parser # we need to make sure that the handler has a reference to the # connection that the parser is attached to. self._maint_notifications_pool_handler = ( maint_notifications_pool_handler.get_handler_for_connection() ) self._maint_notifications_pool_handler.set_connection(self) else: self._maint_notifications_pool_handler = None self._maint_notifications_connection_handler = ( MaintNotificationsConnectionHandler(self, self.maint_notifications_config) ) # Set up pool handler if available if self._maint_notifications_pool_handler: parser.set_node_moving_push_handler( self._maint_notifications_pool_handler.handle_notification ) # Set up connection handler parser.set_maintenance_push_handler( self._maint_notifications_connection_handler.handle_notification ) # Store original connection parameters self.orig_host_address = orig_host_address if orig_host_address else self.host self.orig_socket_timeout = ( orig_socket_timeout if orig_socket_timeout else self.socket_timeout ) self.orig_socket_connect_timeout = ( orig_socket_connect_timeout if orig_socket_connect_timeout else self.socket_connect_timeout ) def set_maint_notifications_pool_handler_for_connection( self, maint_notifications_pool_handler: MaintNotificationsPoolHandler ): # Deep copy the pool handler to avoid sharing the same pool handler # between multiple connections, because otherwise each connection will override # the connection reference and the pool handler will only hold a reference # to the last connection that was set. maint_notifications_pool_handler_copy = ( maint_notifications_pool_handler.get_handler_for_connection() ) maint_notifications_pool_handler_copy.set_connection(self) self._get_parser().set_node_moving_push_handler( maint_notifications_pool_handler_copy.handle_notification ) self._maint_notifications_pool_handler = maint_notifications_pool_handler_copy # Update maintenance notification connection handler if it doesn't exist if not self._maint_notifications_connection_handler: self._maint_notifications_connection_handler = ( MaintNotificationsConnectionHandler( self, maint_notifications_pool_handler.config ) ) self._get_parser().set_maintenance_push_handler( self._maint_notifications_connection_handler.handle_notification ) else: self._maint_notifications_connection_handler.config = ( maint_notifications_pool_handler.config ) def activate_maint_notifications_handling_if_enabled(self, check_health=True): # Send maintenance notifications handshake if RESP3 is active # and maintenance notifications are enabled # and we have a host to determine the endpoint type from # When the maint_notifications_config enabled mode is "auto", # we just log a warning if the handshake fails # When the mode is enabled=True, we raise an exception in case of failure if ( self.get_protocol() not in [2, "2"] and self.maint_notifications_config and self.maint_notifications_config.enabled and self._maint_notifications_connection_handler and hasattr(self, "host") ): self._enable_maintenance_notifications( maint_notifications_config=self.maint_notifications_config, check_health=check_health, ) def _enable_maintenance_notifications( self, maint_notifications_config: MaintNotificationsConfig, check_health=True ): try: host = getattr(self, "host", None) if host is None: raise ValueError( "Cannot enable maintenance notifications for connection" " object that doesn't have a host attribute." ) else: endpoint_type = maint_notifications_config.get_endpoint_type(host, self) self.send_command( "CLIENT", "MAINT_NOTIFICATIONS", "ON", "moving-endpoint-type", endpoint_type.value, check_health=check_health, ) response = self.read_response() if not response or str_if_bytes(response) != "OK": raise ResponseError( "The server doesn't support maintenance notifications" ) except Exception as e: if ( isinstance(e, ResponseError) and maint_notifications_config.enabled == "auto" ): # Log warning but don't fail the connection import logging logger = logging.getLogger(__name__) logger.debug(f"Failed to enable maintenance notifications: {e}") else: raise def get_resolved_ip(self) -> Optional[str]: """ Extract the resolved IP address from an established connection or resolve it from the host. First tries to get the actual IP from the socket (most accurate), then falls back to DNS resolution if needed. Args: connection: The connection object to extract the IP from Returns: str: The resolved IP address, or None if it cannot be determined """ # Method 1: Try to get the actual IP from the established socket connection # This is most accurate as it shows the exact IP being used try: conn_socket = self._get_socket() if conn_socket is not None: peer_addr = conn_socket.getpeername() if peer_addr and len(peer_addr) >= 1: # For TCP sockets, peer_addr is typically (host, port) tuple # Return just the host part return peer_addr[0] except (AttributeError, OSError): # Socket might not be connected or getpeername() might fail pass # Method 2: Fallback to DNS resolution of the host # This is less accurate but works when socket is not available try: host = getattr(self, "host", "localhost") port = getattr(self, "port", 6379) if host: # Use getaddrinfo to resolve the hostname to IP # This mimics what the connection would do during _connect() addr_info = socket.getaddrinfo( host, port, socket.AF_UNSPEC, socket.SOCK_STREAM ) if addr_info: # Return the IP from the first result # addr_info[0] is (family, socktype, proto, canonname, sockaddr) # sockaddr[0] is the IP address return str(addr_info[0][4][0]) except (AttributeError, OSError, socket.gaierror): # DNS resolution might fail pass return None @property def maintenance_state(self) -> MaintenanceState: return self._maintenance_state @maintenance_state.setter def maintenance_state(self, state: "MaintenanceState"): self._maintenance_state = state def getpeername(self): """ Returns the peer name of the connection. """ conn_socket = self._get_socket() if conn_socket: return conn_socket.getpeername()[0] return None def update_current_socket_timeout(self, relaxed_timeout: Optional[float] = None): conn_socket = self._get_socket() if conn_socket: timeout = relaxed_timeout if relaxed_timeout != -1 else self.socket_timeout conn_socket.settimeout(timeout) self.update_parser_timeout(timeout) def update_parser_timeout(self, timeout: Optional[float] = None): parser = self._get_parser() if parser and parser._buffer: if isinstance(parser, _RESP3Parser) and timeout: parser._buffer.socket_timeout = timeout elif isinstance(parser, _HiredisParser): parser._socket_timeout = timeout def set_tmp_settings( self, tmp_host_address: Optional[Union[str, object]] = SENTINEL, tmp_relaxed_timeout: Optional[float] = None, ): """ The value of SENTINEL is used to indicate that the property should not be updated. """ if tmp_host_address and tmp_host_address != SENTINEL: self.host = str(tmp_host_address) if tmp_relaxed_timeout != -1: self.socket_timeout = tmp_relaxed_timeout self.socket_connect_timeout = tmp_relaxed_timeout def reset_tmp_settings( self, reset_host_address: bool = False, reset_relaxed_timeout: bool = False, ): if reset_host_address: self.host = self.orig_host_address if reset_relaxed_timeout: self.socket_timeout = self.orig_socket_timeout self.socket_connect_timeout = self.orig_socket_connect_timeout
MaintNotificationsAbstractConnection