language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
getsentry__sentry
src/sentry/integrations/github/issues.py
{ "start": 1188, "end": 13469 }
class ____(SourceCodeIssueIntegration): def raise_error(self, exc: Exception, identity: Identity | None = None) -> NoReturn: if isinstance(exc, ApiError): if exc.code == 422: invalid_fields = {} if exc.json is not None: for e in exc.json.get("errors", []): field = e.get("field", "unknown field") code = e.get("code", "invalid") value = e.get("value", "unknown value") invalid_fields[field] = f"Got {code} value: {value} for field: {field}" raise IntegrationFormError(invalid_fields) from exc raise IntegrationFormError( {"detail": "Some given field was misconfigured"} ) from exc elif exc.code == 410: raise IntegrationConfigurationError( "Issues are disabled for this repository, please check your repository permissions" ) from exc elif exc.code == 404: raise IntegrationResourceNotFoundError from exc elif exc.code == 403: if exc.json is not None: detail = exc.json.get("message") if detail: raise IntegrationConfigurationError(detail) from exc raise IntegrationConfigurationError( "You are not authorized to create issues in this repository. Please check your repository permissions." ) from exc raise super().raise_error(exc=exc, identity=identity) def make_external_key(self, data: Mapping[str, Any]) -> str: return "{}#{}".format(data["repo"], data["key"]) def get_issue_url(self, key: str) -> str: domain_name, user = self.model.metadata["domain_name"].split("/") repo, issue_id = key.split("#") return f"https://{domain_name}/{repo}/issues/{issue_id}" def get_feedback_issue_body(self, occurrence: IssueOccurrence) -> str: messages = [ evidence for evidence in occurrence.evidence_display if evidence.name == "message" ] others = [ evidence for evidence in occurrence.evidence_display if evidence.name != "message" ] body = "" for message in messages: body += message.value body += "\n\n" body += "| | |\n" body += "| ------------- | --------------- |\n" for evidence in sorted(others, key=attrgetter("important"), reverse=True): body += f"| **{evidence.name}** | {evidence.value} |\n" return body.rstrip("\n") # remove the last new line def get_generic_issue_body(self, occurrence: IssueOccurrence) -> str: body = "| | |\n" body += "| ------------- | --------------- |\n" for evidence in sorted( occurrence.evidence_display, key=attrgetter("important"), reverse=True ): body += f"| **{evidence.name}** | {truncatechars(evidence.value, MAX_CHAR)} |\n" return body[:-2] def get_group_description(self, group: Group, event: Event | GroupEvent, **kwargs: Any) -> str: output = self.get_group_link(group, **kwargs) if isinstance(event, GroupEvent) and event.occurrence is not None: body = "" if group.issue_category == GroupCategory.FEEDBACK: body = self.get_feedback_issue_body(event.occurrence) else: body = self.get_generic_issue_body(event.occurrence) output.extend([body]) else: body = self.get_group_body(group, event) if body: output.extend(["", "```", body, "```"]) return "\n".join(output) def after_link_issue(self, external_issue: ExternalIssue, **kwargs: Any) -> None: data = kwargs["data"] client = self.get_client() repo, issue_num = external_issue.key.split("#") if not repo: raise IntegrationFormError({"repo": "Repository is required"}) if not issue_num: raise IntegrationFormError({"externalIssue": "Issue number is required"}) comment = data.get("comment") if comment: try: client.create_comment(repo=repo, issue_id=issue_num, data={"body": comment}) except ApiError as e: raise IntegrationError(self.message_from_error(e)) def get_persisted_default_config_fields(self) -> Sequence[str]: return ["repo"] def create_default_repo_choice(self, default_repo: str) -> tuple[str, str]: return default_repo, default_repo.split("/")[1] @all_silo_function def get_create_issue_config( self, group: Group | None, user: User | RpcUser, **kwargs: Any ) -> list[dict[str, Any]]: """ We use the `group` to get three things: organization_slug, project defaults, and default title and description. In the case where we're getting `createIssueConfig` from GitHub for Ticket Rules, we don't know the issue group beforehand. :param group: (Optional) Group model. :param user: User model. :param kwargs: (Optional) Object * params: (Optional) Object :return: """ kwargs["link_referrer"] = "github_integration" if group: fields = super().get_create_issue_config(group, user, **kwargs) org = group.organization else: fields = [] org_context = organization_service.get_organization_by_id( id=self.organization_id, include_projects=False, include_teams=False ) assert org_context is not None org = org_context.organization params = kwargs.pop("params", {}) default_repo, repo_choices = self.get_repository_choices(group, params, PAGE_NUMBER_LIMIT) assignees = self.get_allowed_assignees(default_repo) if default_repo else [] labels: Sequence[tuple[str, str]] = [] if default_repo: owner, repo = default_repo.split("/") labels = self.get_repo_labels(owner, repo) autocomplete_url = reverse( "sentry-integration-github-search", args=[org.slug, self.model.id] ) return [ { "name": "repo", "label": "GitHub Repository", "type": "select", "default": default_repo, "choices": repo_choices, "url": autocomplete_url, "updatesForm": True, "required": True, }, *fields, { "name": "assignee", "label": "Assignee", "default": "", "type": "select", "required": False, "choices": assignees, }, { "name": "labels", "label": "Labels", "default": [], "type": "select", "multiple": True, "required": False, "choices": labels, }, ] def create_issue(self, data: Mapping[str, Any], **kwargs: Any) -> Mapping[str, Any]: client = self.get_client() repo = data.get("repo") if not repo: raise IntegrationFormError({"repo": "Repository is required"}) # Create clean issue data with required fields if not data.get("title"): raise IntegrationFormError({"title": "Title is required"}) if not data.get("description"): raise IntegrationFormError({"description": "Description is required"}) issue_data = { "title": data["title"], "body": data["description"], } # Only include optional fields if they have valid values if data.get("assignee"): issue_data["assignee"] = data["assignee"] if data.get("labels"): issue_data["labels"] = data["labels"] try: issue = client.create_issue(repo=repo, data=issue_data) except ApiError as e: self.raise_error(e) return { "key": issue["number"], "title": issue["title"], "description": issue["body"], "url": issue["html_url"], "repo": repo, } def get_link_issue_config(self, group: Group, **kwargs: Any) -> list[dict[str, Any]]: params = kwargs.pop("params", {}) default_repo, repo_choices = self.get_repository_choices(group, params) org = group.organization autocomplete_url = reverse( "sentry-integration-github-search", args=[org.slug, self.model.id] ) def get_linked_issue_comment_prefix(group: Group) -> str: if group.issue_category == GroupCategory.FEEDBACK: return "Sentry Feedback" else: return "Sentry Issue" def get_default_comment(group: Group) -> str: prefix = get_linked_issue_comment_prefix(group) url = group.get_absolute_url(params={"referrer": "github_integration"}) issue_short_id = group.qualified_short_id return f"{prefix}: [{issue_short_id}]({absolute_uri(url)})" return [ { "name": "repo", "label": "GitHub Repository", "type": "select", "default": default_repo, "choices": repo_choices, "url": autocomplete_url, "required": True, "updatesForm": True, }, { "name": "externalIssue", "label": "Issue Number or Title", "default": "", "choices": [], "type": "select", "url": autocomplete_url, "required": True, }, { "name": "comment", "label": "Comment", "default": get_default_comment(group), "type": "textarea", "required": False, "autosize": True, "help": "Leave blank if you don't want to add a comment to the GitHub issue.", }, ] def get_issue(self, issue_id: str, **kwargs: Any) -> Mapping[str, Any]: data = kwargs["data"] repo = data.get("repo") issue_num = data.get("externalIssue") client = self.get_client() if not repo: raise IntegrationFormError({"repo": "Repository is required"}) if not issue_num: raise IntegrationFormError({"externalIssue": "Issue number is required"}) try: issue = client.get_issue(repo, issue_num) except ApiError as e: raise IntegrationError(self.message_from_error(e)) return { "key": issue["number"], "title": issue["title"], "description": issue["body"], "url": issue["html_url"], "repo": repo, } def get_allowed_assignees(self, repo: str) -> Sequence[tuple[str, str]]: client = self.get_client() try: response = client.get_assignees(repo) except Exception as e: self.raise_error(e) users = tuple((u["login"], u["login"]) for u in response) return (("", "Unassigned"),) + users def get_repo_labels(self, owner: str, repo: str) -> Sequence[tuple[str, str]]: client = self.get_client() try: response = client.get_labels(owner, repo) except Exception as e: self.raise_error(e) def natural_sort_pair(pair: tuple[str, str]) -> list[str | int]: return [ int(text) if text.isdecimal() else text.lower() for text in re.split("([0-9]+)", pair[0]) ] # sort alphabetically labels = tuple( sorted([(label["name"], label["name"]) for label in response], key=natural_sort_pair) ) return labels
GitHubIssuesSpec
python
mamba-org__mamba
micromamba/test-server/reposerver.py
{ "start": 1502, "end": 9817 }
class ____: keys = { "root": [], "key_mgr": [ { "private": "c9c2060d7e0d93616c2654840b4983d00221d8b6b69c850107da74b42168f937", "public": "013ddd714962866d12ba5bae273f14d48c89cf0773dee2dbf6d4561e521c83f7", }, ], "pkg_mgr": [ { "private": "f3cdab14740066fb277651ec4f96b9f6c3e3eb3f812269797b9656074cd52133", "public": "f46b5a7caa43640744186564c098955147daa8bac4443887bc64d8bfee3d3569", } ], } def __init__(self, in_folder: str) -> None: self.in_folder = Path(in_folder).resolve() self.folder = self.in_folder.parent / (str(self.in_folder.name) + "_signed") self.keys["root"] = [ get_fingerprint(os.environ["KEY1"]), get_fingerprint(os.environ["KEY2"]), ] self.keys = normalize_keys(self.keys) def make_signed_repo(self) -> Path: print("[reposigner] Using keys:", self.keys) print("[reposigner] Using folder:", self.folder) self.folder.mkdir(exist_ok=True) self.create_root(self.keys) self.create_key_mgr(self.keys) self.create_pkg_mgr(self.keys) for f in glob.glob(str(self.in_folder / "**" / "repodata.json")): self.sign_repodata(Path(f), self.keys) self.copy_signing_root_file() return self.folder def create_root(self, keys): root_keys = keys["root"] root_pubkeys = [k["public"] for k in root_keys] key_mgr_pubkeys = [k["public"] for k in keys["key_mgr"]] root_version = 1 root_md = cct_metadata_construction.build_root_metadata( root_pubkeys=root_pubkeys[0:1], root_threshold=1, root_version=root_version, key_mgr_pubkeys=key_mgr_pubkeys, key_mgr_threshold=1, ) # Wrap the metadata in a signing envelope. root_md = cct_signing.wrap_as_signable(root_md) root_md_serialized_unsigned = cct_common.canonserialize(root_md) root_filepath = self.folder / f"{root_version}.root.json" print("Writing out: ", root_filepath) # Write unsigned sample root metadata. with open(root_filepath, "wb") as fout: fout.write(root_md_serialized_unsigned) # This overwrites the file with a signed version of the file. cct_root_signing.sign_root_metadata_via_gpg(root_filepath, root_keys[0]["fingerprint"]) # Load untrusted signed root metadata. signed_root_md = cct_common.load_metadata_from_file(root_filepath) cct_authentication.verify_signable(signed_root_md, root_pubkeys, 1, gpg=True) print("[reposigner] Root metadata signed & verified!") def create_key_mgr(self, keys): private_key_key_mgr = cct_common.PrivateKey.from_hex(keys["key_mgr"][0]["private"]) pkg_mgr_pub_keys = [k["public"] for k in keys["pkg_mgr"]] key_mgr = cct_metadata_construction.build_delegating_metadata( metadata_type="key_mgr", # 'root' or 'key_mgr' delegations={"pkg_mgr": {"pubkeys": pkg_mgr_pub_keys, "threshold": 1}}, version=1, # timestamp default: now # expiration default: now plus root expiration default duration ) key_mgr = cct_signing.wrap_as_signable(key_mgr) # sign dictionary in place cct_signing.sign_signable(key_mgr, private_key_key_mgr) key_mgr_serialized = cct_common.canonserialize(key_mgr) with open(self.folder / "key_mgr.json", "wb") as fobj: fobj.write(key_mgr_serialized) # let's run a verification root_metadata = cct_common.load_metadata_from_file(self.folder / "1.root.json") key_mgr_metadata = cct_common.load_metadata_from_file(self.folder / "key_mgr.json") cct_common.checkformat_signable(root_metadata) if "delegations" not in root_metadata["signed"]: raise ValueError('Expected "delegations" entry in root metadata.') root_delegations = root_metadata["signed"]["delegations"] # for brevity cct_common.checkformat_delegations(root_delegations) if "key_mgr" not in root_delegations: raise ValueError('Missing expected delegation to "key_mgr" in root metadata.') cct_common.checkformat_delegation(root_delegations["key_mgr"]) # Doing delegation processing. cct_authentication.verify_delegation("key_mgr", key_mgr_metadata, root_metadata) print("[reposigner] Success: key_mgr metadata verified based on root metadata.") return key_mgr # Adding this to be compatible with `mamba` (in `conda` they don't seem to have the `pkg_mgr.json` file) # But the signing does use delegation to `pkg_mgr` role in both cases def create_pkg_mgr(self, keys): private_key_pkg_mgr = cct_common.PrivateKey.from_hex(keys["pkg_mgr"][0]["private"]) pkg_mgr = cct_metadata_construction.build_delegating_metadata( metadata_type="pkg_mgr", delegations=None, version=1, # timestamp default: now # expiration default: now plus root expiration default duration ) pkg_mgr = cct_signing.wrap_as_signable(pkg_mgr) # sign dictionary in place cct_signing.sign_signable(pkg_mgr, private_key_pkg_mgr) pkg_mgr_serialized = cct_common.canonserialize(pkg_mgr) with open(self.folder / "pkg_mgr.json", "wb") as fobj: fobj.write(pkg_mgr_serialized) # let's run a verification key_mgr_metadata = cct_common.load_metadata_from_file(self.folder / "key_mgr.json") pkg_mgr_metadata = cct_common.load_metadata_from_file(self.folder / "pkg_mgr.json") cct_common.checkformat_signable(key_mgr_metadata) if "delegations" not in key_mgr_metadata["signed"]: raise ValueError('Expected "delegations" entry in key_mgr metadata.') key_mgr_delegations = key_mgr_metadata["signed"]["delegations"] # for brevity cct_common.checkformat_delegations(key_mgr_delegations) if "pkg_mgr" not in key_mgr_delegations: raise ValueError('Missing expected delegation to "pkg_mgr" in key_mgr metadata.') cct_common.checkformat_delegation(key_mgr_delegations["pkg_mgr"]) # Doing delegation processing. cct_authentication.verify_delegation("pkg_mgr", pkg_mgr_metadata, key_mgr_metadata) print("[reposigner] Success: pkg_mgr metadata verified based on key_mgr metadata.") return pkg_mgr def sign_repodata(self, repodata_fn, keys): target_folder = self.folder / repodata_fn.parent.name if not target_folder.exists(): target_folder.mkdir() final_fn = target_folder / repodata_fn.name print("copy", repodata_fn, final_fn) shutil.copyfile(repodata_fn, final_fn) pkg_mgr_key = keys["pkg_mgr"][0]["private"] cct_signing.sign_all_in_repodata(str(final_fn), pkg_mgr_key) print(f"[reposigner] Signed {final_fn}") # Copy actual 'test-package-0.1-0.tar.bz2' to serving directory ('repo_signed') pkg_bz2_src_fn = repodata_fn.parent / "test-package-0.1-0.tar.bz2" pkg_bz2_dst_fn = target_folder / "test-package-0.1-0.tar.bz2" print("copy", pkg_bz2_src_fn, pkg_bz2_dst_fn) shutil.copyfile(pkg_bz2_src_fn, pkg_bz2_dst_fn) print("[reposigner] 'test-package-0.1-0.tar.bz2' copied") def copy_signing_root_file(self): # Copy root json file to 'ref_path' # as this should be available in a safe place locally root_prefix = Path(os.environ["MAMBA_ROOT_PREFIX"]) if not root_prefix: fatal_error("MAMBA_ROOT_PREFIX is not set!") # '7da7dc10' corresponds to id of channel 'http://localhost:8000/mychannel' channel_initial_trusted_root_role = root_prefix / "etc/trusted-repos/7da7dc10" if not channel_initial_trusted_root_role.exists(): os.makedirs(channel_initial_trusted_root_role) shutil.copy( self.folder / "1.root.json", channel_initial_trusted_root_role / "root.json", ) print("Initial trusted root copied")
RepoSigner
python
getsentry__sentry
tests/sentry/runner/commands/test_createflag.py
{ "start": 272, "end": 3664 }
class ____(CliTestCase): command = createflag def convert_output_to_feature(self, output: str) -> Feature: split_output = output.split("=== GENERATED YAML ===\n") assert len(split_output) == 2 return Feature.from_bulk_yaml(split_output[1])[0] def test_blank_options_only(self) -> None: rv = self.invoke("--blank", "--name=new flag", "--scope=organizations", "--owner=test") assert rv.exit_code == 0 parsed_feature = self.convert_output_to_feature(rv.output) assert parsed_feature.name == "feature.organizations:new-flag" assert parsed_feature.segments == [] assert parsed_feature.owner == "test" def test_no_segments(self) -> None: cli_input = ["new Flag", "Test Owner", "projects", "n"] rv = self.invoke(input="\n".join(cli_input)) assert rv.exit_code == 0 parsed_feature = self.convert_output_to_feature(rv.output) assert parsed_feature.name == "feature.projects:new-flag" assert parsed_feature.segments == [] assert parsed_feature.owner == "Test Owner" def test_no_conditions_in_segment(self) -> None: cli_input = ["y", "New segment", "50", "n", "n"] rv = self.invoke( "--name=new flag", "--scope=organizations", "--owner=Test Owner", input="\n".join(cli_input), catch_exceptions=False, ) assert rv.exit_code == 0 parsed_feature = self.convert_output_to_feature(rv.output) assert parsed_feature.name == "feature.organizations:new-flag" assert parsed_feature.owner == "Test Owner" assert len(parsed_feature.segments) == 1 new_segment = parsed_feature.segments[0] assert new_segment.name == "New segment" assert new_segment.rollout == 50 assert new_segment.conditions == [] def test_all_condition_types(self) -> None: cli_input = ["", "New segment", "", "y"] conditions_tuples = [] for condition_type in ConditionOperatorKind: condition_data = (f"c_prop_{condition_type.value}", f"{condition_type.value}", "y") conditions_tuples.append(condition_data) cli_input.extend(condition_data) # Change last input to No to discontinue creating conditions cli_input[len(cli_input) - 1] = "n" # Skip creating more segments cli_input.append("n") rv = self.invoke( "--name=new flag", "--scope=organizations", "--owner=Test Owner", input="\n".join(cli_input), ) assert rv.exit_code == 0, rv.output parsed_feature = self.convert_output_to_feature(rv.output) assert parsed_feature.name == "feature.organizations:new-flag" assert parsed_feature.owner == "Test Owner" assert len(parsed_feature.segments) == 1 new_segment = parsed_feature.segments[0] assert new_segment.name == "New segment" assert new_segment.rollout == 100 assert len(new_segment.conditions) == 6 for c_idx in range(len(conditions_tuples)): condition_tuple = conditions_tuples[c_idx] condition = new_segment.conditions[c_idx] assert condition.property == condition_tuple[0] assert condition.operator == condition_tuple[1]
TestCreateFlag
python
pydata__xarray
xarray/tests/test_conventions.py
{ "start": 24457, "end": 27105 }
class ____: def test_decode_cf_variable_with_array_units(self) -> None: v = Variable(["t"], [1, 2, 3], {"units": np.array(["foobar"], dtype=object)}) v_decoded = conventions.decode_cf_variable("test2", v) assert_identical(v, v_decoded) def test_decode_cf_variable_timedelta64(): variable = Variable(["time"], pd.timedelta_range("1D", periods=2)) decoded = conventions.decode_cf_variable("time", variable) assert decoded.encoding == {} assert_identical(decoded, variable) def test_decode_cf_variable_datetime64(): variable = Variable(["time"], pd.date_range("2000", periods=2)) decoded = conventions.decode_cf_variable("time", variable) assert decoded.encoding == {} assert_identical(decoded, variable) @requires_cftime def test_decode_cf_variable_cftime(): variable = Variable(["time"], date_range("2000", periods=2, use_cftime=True)) decoded = conventions.decode_cf_variable("time", variable) assert decoded.encoding == {} assert_identical(decoded, variable) def test_scalar_units() -> None: # test that scalar units does not raise an exception var = Variable(["t"], [np.nan, np.nan, 2], {"units": np.nan}) actual = conventions.decode_cf_variable("t", var) assert_identical(actual, var) def test_decode_cf_error_includes_variable_name(): ds = Dataset({"my_invalid_var": ([], 1e36, {"units": "days since 2000-01-01"})}) with pytest.raises( ValueError, match=r"unable to decode(?s:.*)my_invalid_var", ): decode_cf(ds) def test_encode_cf_variable_with_vlen_dtype() -> None: v = Variable( ["x"], np.array(["a", "b"], dtype=coding.strings.create_vlen_dtype(str)) ) encoded_v = conventions.encode_cf_variable(v) assert encoded_v.data.dtype.kind == "O" assert coding.strings.check_vlen_dtype(encoded_v.data.dtype) is str # empty array v = Variable(["x"], np.array([], dtype=coding.strings.create_vlen_dtype(str))) encoded_v = conventions.encode_cf_variable(v) assert encoded_v.data.dtype.kind == "O" assert coding.strings.check_vlen_dtype(encoded_v.data.dtype) is str def test_decode_cf_variables_decode_timedelta_warning() -> None: v = Variable(["time"], [1, 2], attrs={"units": "seconds"}) variables = {"a": v} with warnings.catch_warnings(): warnings.filterwarnings("error", "decode_timedelta", FutureWarning) conventions.decode_cf_variables(variables, {}, decode_timedelta=True) with pytest.warns(FutureWarning, match="decode_timedelta"): conventions.decode_cf_variables(variables, {})
TestDecodeCFVariableWithArrayUnits
python
spack__spack
lib/spack/spack/util/package_hash.py
{ "start": 14597, "end": 14713 }
class ____(spack.error.SpackError): """Raised for all errors encountered during package hashing."""
PackageHashError
python
walkccc__LeetCode
solutions/3241. Time Taken to Mark All Nodes/3241.py
{ "start": 176, "end": 537 }
class ____: def __init__(self, top1: Node = Node(), top2: Node = Node()): # the direct child node, where the time taken to mark the entire subtree # rooted at the node is the maximum self.top1 = top1 # the direct child node, where the time taken to mark the entire subtree # rooted at the node is the second maximum self.top2 = top2
Top2
python
numba__numba
numba/core/typeinfer.py
{ "start": 10640, "end": 13964 }
class ____(object): def __init__(self, target, items, special_value, value_indexes, loc): self.target = target self.items = items self.special_value = special_value self.value_indexes = value_indexes self.loc = loc def __call__(self, typeinfer): with new_error_context("typing of dict at {loc}", loc=self.loc): typevars = typeinfer.typevars # figure out what sort of dict is being dealt with tsets = [(typevars[k.name].getone(), typevars[v.name].getone()) for k, v in self.items] if not tsets: typeinfer.add_type(self.target, types.DictType(types.undefined, types.undefined, self.special_value), loc=self.loc) else: # all the info is known about the dict, if its # str keys -> random heterogeneous values treat as literalstrkey ktys = [x[0] for x in tsets] vtys = [x[1] for x in tsets] strkey = all([isinstance(x, types.StringLiteral) for x in ktys]) literalvty = all([isinstance(x, types.Literal) for x in vtys]) vt0 = types.unliteral(vtys[0]) # homogeneous values comes in the form of being able to cast # all the other values in the ctor to the type of the first. # The order is important as `typed.Dict` takes it's type from # the first element. def check(other): conv = typeinfer.context.can_convert(other, vt0) return conv is not None and conv < Conversion.unsafe homogeneous = all([check(types.unliteral(x)) for x in vtys]) # Special cases: # Single key:value in ctor, key is str, value is an otherwise # illegal container type, e.g. LiteralStrKeyDict or # List, there's no way to put this into a typed.Dict, so make it # a LiteralStrKeyDict, same goes for LiteralList. if len(vtys) == 1: valty = vtys[0] if isinstance(valty, (types.LiteralStrKeyDict, types.List, types.LiteralList)): homogeneous = False if strkey and not homogeneous: resolved_dict = {x: y for x, y in zip(ktys, vtys)} ty = types.LiteralStrKeyDict(resolved_dict, self.value_indexes) typeinfer.add_type(self.target, ty, loc=self.loc) else: init_value = self.special_value if literalvty else None key_type, value_type = tsets[0] typeinfer.add_type(self.target, types.DictType(key_type, value_type, init_value), loc=self.loc)
BuildMapConstraint
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/some_virtual_preferred/package.py
{ "start": 217, "end": 518 }
class ____(Package): """Package providing a virtual dependency with a preference in packages.yaml""" homepage = "http://www.example.com" url = "http://www.example.com/foo-1.0.tar.gz" version("1.0", md5="0123456789abcdef0123456789abcdef") provides("somevirtual")
SomeVirtualPreferred
python
django__django
tests/admin_views/models.py
{ "start": 9139, "end": 9474 }
class ____(models.Model): """ Used to check autocomplete to_field resolution when ForeignKey is PK. """ parent = models.ForeignKey(Parent, models.CASCADE, primary_key=True) name = models.CharField(max_length=128) class Meta: ordering = ["parent"] def __str__(self): return self.name
PKChild
python
kamyu104__LeetCode-Solutions
Python/toeplitz-matrix.py
{ "start": 345, "end": 800 }
class ____(object): def isToeplitzMatrix(self, matrix): """ :type matrix: List[List[int]] :rtype: bool """ for row_index, row in enumerate(matrix): for digit_index, digit in enumerate(row): if not row_index or not digit_index: continue if matrix[row_index - 1][digit_index - 1] != digit: return False return True
Solution2
python
PyCQA__pylint
tests/functional/u/used/used_before_assignment_typing.py
{ "start": 3422, "end": 3781 }
class ____: """Class to test self referential variable typing within conditionals. This regressed, reported in: https://github.com/pylint-dev/pylint/issues/5499 """ def function(self, var: int) -> None: if var < 0.5: _x: MyThirdClass = self def other_function(self) -> None: _x: MyThirdClass = self
MyThirdClass
python
astropy__astropy
astropy/table/tests/test_subclass.py
{ "start": 1293, "end": 1901 }
class ____(table.Row): """ Row class that allows access to an arbitrary dict of parameters stored as a dict object in the ``params`` column. """ def __getitem__(self, item): if item not in self.colnames: return super().__getitem__("params")[item] else: return super().__getitem__(item) def keys(self): out = [name for name in self.colnames if name != "params"] params = [key.lower() for key in sorted(self["params"])] return out + params def values(self): return [self[key] for key in self.keys()]
ParamsRow
python
tensorflow__tensorflow
tensorflow/python/framework/extension_type.py
{ "start": 30532, "end": 40574 }
class ____(ExtensionType): """An ExtensionType that can be batched and unbatched. `BatchableExtensionType`s can be used with APIs that require batching or unbatching, including `Keras`, `tf.data.Dataset`, and `tf.map_fn`. E.g.: >>> class Vehicle(tf.experimental.BatchableExtensionType): ... top_speed: tf.Tensor ... mpg: tf.Tensor >>> batch = Vehicle([120, 150, 80], [30, 40, 12]) >>> tf.map_fn(lambda vehicle: vehicle.top_speed * vehicle.mpg, batch, ... fn_output_signature=tf.int32).numpy() array([3600, 6000, 960], dtype=int32) An `ExtensionTypeBatchEncoder` is used by these APIs to encode `ExtensionType` values. The default encoder assumes that values can be stacked, unstacked, or concatenated by simply stacking, unstacking, or concatenating every nested `Tensor`, `ExtensionType`, `CompositeTensor`, or `TensorShape` field. Extension types where this is not the case will need to override `__batch_encoder__` with a custom `ExtensionTypeBatchEncoder`. See `tf.experimental.ExtensionTypeBatchEncoder` for more details. """ # Let the metaclass know that it should *not* transform this class (since # this class is part of the ExtensionType framework, and not a user class). _tf_extension_type_do_not_transform_this_class = True # For Pickle __reduce__ protocol: def _deserialize_for_reduce(value_type, serialization): return value_type.Spec._deserialize(serialization) # pylint: disable=protected-access def _replace_tensor_with_spec(value): if isinstance(value, tensor.Tensor): # Note: we intentionally exclude `value.name` from the `TensorSpec`. return tensor.TensorSpec(value.shape, value.dtype) if hasattr(value, '_type_spec'): return value._type_spec # pylint: disable=protected-access return value def _change_nested_mappings_to(value, new_type): """Recursively replace mappings with `new_type`.""" if isinstance(value, (dict, immutable_dict.ImmutableDict)): return new_type( [ (k, _change_nested_mappings_to(v, new_type)) for (k, v) in value.items() ] ) elif isinstance(value, tuple): return tuple(_change_nested_mappings_to(elt, new_type) for elt in value) else: return value # ============================================================================== # Helper methods for tf.ExtensionTypeMetaclass # ============================================================================== def _check_field_annotations(cls): """Validates the field annotations for tf.ExtensionType subclass `cls`.""" annotations = getattr(cls, '__annotations__', {}) # Check that no fields use reserved names. for name, value in cls.__dict__.items(): if name == 'Spec': if not isinstance(value, type): raise ValueError( f'{cls.__qualname__}.Spec must be a nested class; got {value}.' ) if value.__bases__ != (type_spec.TypeSpec,) and value.__bases__ != ( object, ): raise ValueError( f'{cls.__qualname__}.Spec must be directly subclassed ' 'from tf.TypeSpec.' ) elif extension_type_field.ExtensionTypeField.is_reserved_name(name): raise ValueError( f'The field annotations for {cls.__name__} are ' f"invalid. Field '{name}' is reserved." ) for name in annotations: if extension_type_field.ExtensionTypeField.is_reserved_name(name): raise ValueError( f'The field annotations for {cls.__name__} are ' f"invalid. Field '{name}' is reserved." ) # Check that all fields have type annotaitons. for key, value in cls.__dict__.items(): if not ( key in annotations or callable(value) or key.startswith('_abc_') or key == '_tf_extension_type_fields' or key.startswith('__') and key.endswith('__') or isinstance(value, (property, classmethod, staticmethod)) ): raise ValueError( f'The field annotations for {cls.__name__} are ' f'invalid. Field {key} is missing a type annotation.' ) def _add_extension_type_constructor(cls): """Creates a constructor for a ExtensionType or ExtensionTypeSpec subclass.""" if '__init__' in cls.__dict__: _wrap_user_constructor(cls) else: _build_extension_type_constructor(cls) def _wrap_user_constructor(cls): """Wraps a user-defined constructor for tf.ExtensionType subclass `cls`.""" user_constructor = cls.__init__ def wrapped_init(self, *args, **kwargs): self.__dict__[_IN_CONSTRUCTOR] = True user_constructor(self, *args, **kwargs) del self.__dict__[_IN_CONSTRUCTOR] self._tf_extension_type_convert_fields() # pylint: disable=protected-access self.__validate__() cls.__init__ = tf_decorator.make_decorator(user_constructor, wrapped_init) _NO_DEFAULT = extension_type_field.ExtensionTypeField.NO_DEFAULT def _build_extension_type_constructor(cls): """Builds a constructor for tf.ExtensionType subclass `cls`.""" fields = cls._tf_extension_type_fields() # pylint: disable=protected-access # Mark any no-default fields that follow default fields as keyword_only. got_default = False keyword_only_start = len(fields) for i in range(len(fields)): if got_default: if fields[i].default is _NO_DEFAULT: keyword_only_start = i break elif fields[i].default is not _NO_DEFAULT: got_default = True params = [] for i, field in enumerate(fields): if i < keyword_only_start: kind = tf_inspect.Parameter.POSITIONAL_OR_KEYWORD else: kind = tf_inspect.Parameter.KEYWORD_ONLY if field.default is _NO_DEFAULT: default = tf_inspect.Parameter.empty else: default = field.default params.append( tf_inspect.Parameter( field.name, kind, default=default, annotation=field.value_type ) ) signature = tf_inspect.Signature(params, return_annotation=cls.__name__) def __init__(self, *args, **kwargs): # pylint: disable=invalid-name bound_args = signature.bind(*args, **kwargs) bound_args.apply_defaults() self.__dict__.update(bound_args.arguments) self._tf_extension_type_convert_fields() # pylint: disable=protected-access self.__validate__() # __signature__ is supported by some inspection/documentation tools # (but note: typing.get_type_hints does not respect __signature__). __init__.__signature__ = tf_inspect.Signature( [tf_inspect.Parameter('self', tf_inspect.Parameter.POSITIONAL_OR_KEYWORD)] + params, return_annotation=cls, ) cls.__init__ = __init__ def _build_spec_constructor(cls): """Builds a constructor for ExtensionTypeSpec subclass `cls`.""" params = [] kind = tf_inspect.Parameter.POSITIONAL_OR_KEYWORD for field in cls._tf_extension_type_fields(): # pylint: disable=protected-access params.append(tf_inspect.Parameter(field.name, kind)) signature = tf_inspect.Signature(params, return_annotation=cls.__name__) def __init__(self, *args, **kwargs): # pylint: disable=invalid-name bound_args = signature.bind(*args, **kwargs) bound_args.apply_defaults() self.__dict__.update(bound_args.arguments) self._tf_extension_type_convert_fields() # pylint: disable=protected-access self.__validate__() # __signature__ is supported by some inspection/documentation tools. __init__.__signature__ = tf_inspect.Signature( [tf_inspect.Parameter('self', tf_inspect.Parameter.POSITIONAL_OR_KEYWORD)] + params, return_annotation=cls, ) cls.__init__ = __init__ def _add_type_spec(cls): """Creates a nested TypeSpec class for tf.ExtensionType subclass `cls`.""" spec_name = cls.__name__ + '.Spec' spec_qualname = cls.__qualname__ + '.Spec' # Set __module__ explicitly as a dynamic created class has module='abc' # by default. spec_dict = {'value_type': cls, '__module__': cls.__module__} # Copy user-supplied customizations into the TypeSpec. user_spec = cls.__dict__.get('Spec', None) if user_spec is not None: for name, value in user_spec.__dict__.items(): if extension_type_field.ExtensionTypeField.is_reserved_name(name): raise ValueError( f"TypeSpec {spec_qualname} uses reserved name '{name}'." ) if cls._tf_extension_type_has_field(name): # pylint: disable=protected-access raise ValueError( f"TypeSpec {spec_qualname} defines a variable '{name}'" f' which shadows a field in {cls.__qualname__}' ) if name in ('__module__', '__dict__', '__weakref__'): continue spec_dict[name] = value if issubclass(cls, BatchableExtensionType): type_spec_base = BatchableExtensionTypeSpec if ( hasattr(cls, '__batch_encoder__') and '__batch_encoder__' not in spec_dict ): spec_dict['__batch_encoder__'] = cls.__batch_encoder__ else: type_spec_base = ExtensionTypeSpec if hasattr(cls, '__batch_encoder__') or '__batch_encoder__' in spec_dict: raise ValueError( '__batch_encoder__ should only be defined for ' 'BatchableExtensionType classes.' ) # Build the TypeSpec and store it as a nested class inside `cls`. spec = type(spec_name, (type_spec_base,), spec_dict) spec.__qualname__ = spec_qualname setattr(cls, 'Spec', spec) # Build a constructor for the TypeSpec class. if '__init__' in spec.__dict__: _wrap_user_constructor(spec) else: _build_spec_constructor(spec) cls.__abstractmethods__ -= {'_type_spec'} # If the user included an explicit `__name__` attribute, then use that to # register the TypeSpec (so it can be used in SavedModel signatures). if '__name__' in cls.__dict__: type_spec_registry.register(cls.__dict__['__name__'] + '.Spec')(spec) # ============================================================================== # Anonymous ExtensionType # ==============================================================================
BatchableExtensionType
python
viewflow__viewflow
tests/workflow/test_fields__token.py
{ "start": 869, "end": 998 }
class ____(forms.ModelForm): # noqa: D101 class Meta: model = TokenTestModel fields = ('token', )
TokenTestForm
python
run-llama__llama_index
llama-index-finetuning/llama_index/finetuning/azure_openai/base.py
{ "start": 444, "end": 4470 }
class ____(OpenAIFinetuneEngine): """AzureOpenAI Finetuning Engine.""" def __init__( self, base_model: str, data_path: str, verbose: bool = False, start_job_id: Optional[str] = None, validate_json: bool = True, ) -> None: """Init params.""" self.base_model = base_model self.data_path = data_path self._verbose = verbose self._validate_json = validate_json self._start_job: Optional[Any] = None self._client = SyncAzureOpenAI( azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), api_key=os.getenv("AZURE_OPENAI_API_KEY", None), api_version=os.getenv("OPENAI_API_VERSION", "2024-02-01"), ) if start_job_id is not None: self._start_job = self._client.fine_tuning.jobs.retrieve(start_job_id) @classmethod def from_finetuning_handler( cls, finetuning_handler: OpenAIFineTuningHandler, base_model: str, data_path: str, **kwargs: Any, ) -> "AzureOpenAIFinetuneEngine": """ Initialize from finetuning handler. Used to finetune an AzureOpenAI model into another AzureOpenAI model (e.g. gpt-4o-mini on top of gpt-4o). """ finetuning_handler.save_finetuning_events(data_path) return cls(base_model=base_model, data_path=data_path, **kwargs) def deploy_finetuned_model( self, token: str, subscription_id: str, resource_group: str, resource_name: str, model_deployment_name: Optional[str] = None, ) -> LLM: """ Deploy finetuned model. - token: Azure AD token. - subscription_id: The subscription ID for the associated Azure OpenAI resource. - resource_group: The resource group name for your Azure OpenAI resource. - resource_name: The Azure OpenAI resource name. - model_deployment_name: Custom deployment name that you will use to reference the model when making inference calls. """ current_job = self.get_current_job() job_id = current_job.id status = current_job.status model_id = current_job.fine_tuned_model if model_id is None: raise ValueError( f"Job {job_id} does not have a finetuned model id ready yet." ) if status != "succeeded": raise ValueError(f"Job {job_id} has status {status}, cannot get model") fine_tuned_model = current_job.fine_tuned_model if model_deployment_name is None: model_deployment_name = fine_tuned_model deploy_params = {"api-version": os.getenv("OPENAI_API_VERSION", "2024-02-01")} deploy_headers = { "Authorization": f"Bearer {token}", "Content-Type": "application/json", } deploy_data = { "sku": {"name": "standard", "capacity": 1}, "properties": { "model": {"format": "OpenAI", "name": fine_tuned_model, "version": "1"} }, } deploy_data = json.dumps(deploy_data) request_url = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.CognitiveServices/accounts/{resource_name}/deployments/{model_deployment_name}" print("Creating a new deployment...") response = requests.put( request_url, params=deploy_params, headers=deploy_headers, data=deploy_data ) return response.json() def get_finetuned_model(self, engine: str, **model_kwargs: Any) -> LLM: """ Get finetuned model. - engine: This will correspond to the custom name you chose for your deployment when you deployed a model. """ current_job = self.get_current_job() return AzureOpenAI( engine=engine or current_job.fine_tuned_model, **model_kwargs )
AzureOpenAIFinetuneEngine
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_vision.py
{ "start": 3996, "end": 4869 }
class ____: @mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook") def test_minimal_green_path(self, mock_hook): mock_hook.return_value.update_product_set.return_value = {} op = CloudVisionUpdateProductSetOperator( location=LOCATION_TEST, product_set=PRODUCTSET_TEST, task_id="id" ) op.execute(context=None) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=None, ) mock_hook.return_value.update_product_set.assert_called_once_with( location=LOCATION_TEST, product_set=PRODUCTSET_TEST, product_set_id=None, project_id=None, retry=DEFAULT, timeout=None, metadata=(), update_mask=None, )
TestCloudVisionProductSetUpdate
python
getsentry__sentry
src/sentry/api/endpoints/project_profiling_profile.py
{ "start": 2435, "end": 2993 }
class ____(ProjectProfilingBaseEndpoint): def get(self, request: Request, project: Project, profile_id: str) -> HttpResponse: if not features.has("organizations:profiling", project.organization, actor=request.user): return Response(status=404) kwargs: dict[str, Any] = { "method": "GET", "path": f"/organizations/{project.organization_id}/projects/{project.id}/raw_profiles/{profile_id}", } return proxy_profiling_service(**kwargs) @region_silo_endpoint
ProjectProfilingRawProfileEndpoint
python
networkx__networkx
networkx/algorithms/centrality/tests/test_group.py
{ "start": 6839, "end": 8685 }
class ____: def test_group_degree_centrality_single_node(self): """ Group degree centrality for a single node group """ G = nx.path_graph(4) d = nx.group_degree_centrality(G, [1]) d_answer = nx.degree_centrality(G)[1] assert d == d_answer def test_group_degree_centrality_multiple_node(self): """ Group degree centrality for group with more than 1 node """ G = nx.Graph() G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) G.add_edges_from( [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] ) d = nx.group_degree_centrality(G, [1, 2]) d_answer = 1 assert d == d_answer def test_group_in_degree_centrality(self): """ Group in-degree centrality in a DiGraph """ G = nx.DiGraph() G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) G.add_edges_from( [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] ) d = nx.group_in_degree_centrality(G, [1, 2]) d_answer = 0 assert d == d_answer def test_group_out_degree_centrality(self): """ Group out-degree centrality in a DiGraph """ G = nx.DiGraph() G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) G.add_edges_from( [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] ) d = nx.group_out_degree_centrality(G, [1, 2]) d_answer = 1 assert d == d_answer def test_group_degree_centrality_node_not_in_graph(self): """ Node(s) in S not in graph, raises NetworkXError """ with pytest.raises(nx.NetworkXError): nx.group_degree_centrality(nx.path_graph(5), [6, 7, 8])
TestGroupDegreeCentrality
python
sympy__sympy
sympy/series/sequences.py
{ "start": 1025, "end": 11521 }
class ____(Basic): """Base class for sequences""" is_commutative = True _op_priority = 15 @staticmethod def _start_key(expr): """Return start (if possible) else S.Infinity. adapted from Set._infimum_key """ try: start = expr.start except NotImplementedError: start = S.Infinity return start def _intersect_interval(self, other): """Returns start and stop. Takes intersection over the two intervals. """ interval = Intersection(self.interval, other.interval) return interval.inf, interval.sup @property def gen(self): """Returns the generator for the sequence""" raise NotImplementedError("(%s).gen" % self) @property def interval(self): """The interval on which the sequence is defined""" raise NotImplementedError("(%s).interval" % self) @property def start(self): """The starting point of the sequence. This point is included""" raise NotImplementedError("(%s).start" % self) @property def stop(self): """The ending point of the sequence. This point is included""" raise NotImplementedError("(%s).stop" % self) @property def length(self): """Length of the sequence""" raise NotImplementedError("(%s).length" % self) @property def variables(self): """Returns a tuple of variables that are bounded""" return () @property def free_symbols(self): """ This method returns the symbols in the object, excluding those that take on a specific value (i.e. the dummy symbols). Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n, m >>> SeqFormula(m*n**2, (n, 0, 5)).free_symbols {m} """ return ({j for i in self.args for j in i.free_symbols .difference(self.variables)}) @cacheit def coeff(self, pt): """Returns the coefficient at point pt""" if pt < self.start or pt > self.stop: raise IndexError("Index %s out of bounds %s" % (pt, self.interval)) return self._eval_coeff(pt) def _eval_coeff(self, pt): raise NotImplementedError("The _eval_coeff method should be added to" "%s to return coefficient so it is available" "when coeff calls it." % self.func) def _ith_point(self, i): """Returns the i'th point of a sequence. Explanation =========== If start point is negative infinity, point is returned from the end. Assumes the first point to be indexed zero. Examples ========= >>> from sympy import oo >>> from sympy.series.sequences import SeqPer bounded >>> SeqPer((1, 2, 3), (-10, 10))._ith_point(0) -10 >>> SeqPer((1, 2, 3), (-10, 10))._ith_point(5) -5 End is at infinity >>> SeqPer((1, 2, 3), (0, oo))._ith_point(5) 5 Starts at negative infinity >>> SeqPer((1, 2, 3), (-oo, 0))._ith_point(5) -5 """ if self.start is S.NegativeInfinity: initial = self.stop else: initial = self.start if self.start is S.NegativeInfinity: step = -1 else: step = 1 return initial + i*step def _add(self, other): """ Should only be used internally. Explanation =========== self._add(other) returns a new, term-wise added sequence if self knows how to add with other, otherwise it returns ``None``. ``other`` should only be a sequence object. Used within :class:`SeqAdd` class. """ return None def _mul(self, other): """ Should only be used internally. Explanation =========== self._mul(other) returns a new, term-wise multiplied sequence if self knows how to multiply with other, otherwise it returns ``None``. ``other`` should only be a sequence object. Used within :class:`SeqMul` class. """ return None def coeff_mul(self, other): """ Should be used when ``other`` is not a sequence. Should be defined to define custom behaviour. Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2).coeff_mul(2) SeqFormula(2*n**2, (n, 0, oo)) Notes ===== '*' defines multiplication of sequences with sequences only. """ return Mul(self, other) def __add__(self, other): """Returns the term-wise addition of 'self' and 'other'. ``other`` should be a sequence. Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2) + SeqFormula(n**3) SeqFormula(n**3 + n**2, (n, 0, oo)) """ if not isinstance(other, SeqBase): raise TypeError('cannot add sequence and %s' % type(other)) return SeqAdd(self, other) @call_highest_priority('__add__') def __radd__(self, other): return self + other def __sub__(self, other): """Returns the term-wise subtraction of ``self`` and ``other``. ``other`` should be a sequence. Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2) - (SeqFormula(n)) SeqFormula(n**2 - n, (n, 0, oo)) """ if not isinstance(other, SeqBase): raise TypeError('cannot subtract sequence and %s' % type(other)) return SeqAdd(self, -other) @call_highest_priority('__sub__') def __rsub__(self, other): return (-self) + other def __neg__(self): """Negates the sequence. Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n >>> -SeqFormula(n**2) SeqFormula(-n**2, (n, 0, oo)) """ return self.coeff_mul(-1) def __mul__(self, other): """Returns the term-wise multiplication of 'self' and 'other'. ``other`` should be a sequence. For ``other`` not being a sequence see :func:`coeff_mul` method. Examples ======== >>> from sympy import SeqFormula >>> from sympy.abc import n >>> SeqFormula(n**2) * (SeqFormula(n)) SeqFormula(n**3, (n, 0, oo)) """ if not isinstance(other, SeqBase): raise TypeError('cannot multiply sequence and %s' % type(other)) return SeqMul(self, other) @call_highest_priority('__mul__') def __rmul__(self, other): return self * other def __iter__(self): for i in range(self.length): pt = self._ith_point(i) yield self.coeff(pt) def __getitem__(self, index): if isinstance(index, int): index = self._ith_point(index) return self.coeff(index) elif isinstance(index, slice): start, stop = index.start, index.stop if start is None: start = 0 if stop is None: stop = self.length return [self.coeff(self._ith_point(i)) for i in range(start, stop, index.step or 1)] def find_linear_recurrence(self,n,d=None,gfvar=None): r""" Finds the shortest linear recurrence that satisfies the first n terms of sequence of order `\leq` ``n/2`` if possible. If ``d`` is specified, find shortest linear recurrence of order `\leq` min(d, n/2) if possible. Returns list of coefficients ``[b(1), b(2), ...]`` corresponding to the recurrence relation ``x(n) = b(1)*x(n-1) + b(2)*x(n-2) + ...`` Returns ``[]`` if no recurrence is found. If gfvar is specified, also returns ordinary generating function as a function of gfvar. Examples ======== >>> from sympy import sequence, sqrt, oo, lucas >>> from sympy.abc import n, x, y >>> sequence(n**2).find_linear_recurrence(10, 2) [] >>> sequence(n**2).find_linear_recurrence(10) [3, -3, 1] >>> sequence(2**n).find_linear_recurrence(10) [2] >>> sequence(23*n**4+91*n**2).find_linear_recurrence(10) [5, -10, 10, -5, 1] >>> sequence(sqrt(5)*(((1 + sqrt(5))/2)**n - (-(1 + sqrt(5))/2)**(-n))/5).find_linear_recurrence(10) [1, 1] >>> sequence(x+y*(-2)**(-n), (n, 0, oo)).find_linear_recurrence(30) [1/2, 1/2] >>> sequence(3*5**n + 12).find_linear_recurrence(20,gfvar=x) ([6, -5], 3*(5 - 21*x)/((x - 1)*(5*x - 1))) >>> sequence(lucas(n)).find_linear_recurrence(15,gfvar=x) ([1, 1], (x - 2)/(x**2 + x - 1)) """ from sympy.simplify import simplify x = [simplify(expand(t)) for t in self[:n]] lx = len(x) if d is None: r = lx//2 else: r = min(d,lx//2) coeffs = [] for l in range(1, r+1): l2 = 2*l mlist = [] for k in range(l): mlist.append(x[k:k+l]) m = Matrix(mlist) if m.det() != 0: y = simplify(m.LUsolve(Matrix(x[l:l2]))) if lx == l2: coeffs = flatten(y[::-1]) break mlist = [] for k in range(l,lx-l): mlist.append(x[k:k+l]) m = Matrix(mlist) if m*y == Matrix(x[l2:]): coeffs = flatten(y[::-1]) break if gfvar is None: return coeffs else: l = len(coeffs) if l == 0: return [], None else: n, d = x[l-1]*gfvar**(l-1), 1 - coeffs[l-1]*gfvar**l for i in range(l-1): n += x[i]*gfvar**i for j in range(l-i-1): n -= coeffs[i]*x[j]*gfvar**(i+j+1) d -= coeffs[i]*gfvar**(i+1) return coeffs, simplify(factor(n)/factor(d))
SeqBase
python
scipy__scipy
scipy/special/tests/test_ndtr.py
{ "start": 475, "end": 2680 }
class ____: # The expected values in these tests were computed with mpmath: # # def log_ndtr_mp(x): # return mpmath.log(mpmath.ncdf(x)) # def test_log_ndtr_moderate_le8(self): x = np.array([-0.75, -0.25, 0, 0.5, 1.5, 2.5, 3, 4, 5, 7, 8]) expected = np.array([-1.4844482299196562, -0.9130617648111351, -0.6931471805599453, -0.3689464152886564, -0.06914345561223398, -0.006229025485860002, -0.0013508099647481938, -3.167174337748927e-05, -2.866516129637636e-07, -1.279812543886654e-12, -6.220960574271786e-16]) y = sc.log_ndtr(x) assert_allclose(y, expected, rtol=1e-14) def test_log_ndtr_values_8_16(self): x = np.array([8.001, 8.06, 8.15, 8.5, 10, 12, 14, 16]) expected = [-6.170639424817055e-16, -3.814722443652823e-16, -1.819621363526629e-16, -9.479534822203318e-18, -7.619853024160525e-24, -1.776482112077679e-33, -7.7935368191928e-45, -6.388754400538087e-58] y = sc.log_ndtr(x) assert_allclose(y, expected, rtol=5e-14) def test_log_ndtr_values_16_31(self): x = np.array([16.15, 20.3, 21.4, 26.2, 30.9]) expected = [-5.678084565148492e-59, -6.429244467698346e-92, -6.680402412553295e-102, -1.328698078458869e-151, -5.972288641838264e-210] y = sc.log_ndtr(x) assert_allclose(y, expected, rtol=2e-13) def test_log_ndtr_values_gt31(self): x = np.array([31.6, 32.8, 34.9, 37.1]) expected = [-1.846036234858162e-219, -2.9440539964066835e-236, -3.71721649450857e-267, -1.4047119663106221e-301] y = sc.log_ndtr(x) assert_allclose(y, expected, rtol=3e-13)
TestLogNdtr
python
EpistasisLab__tpot
tpot/search_spaces/pipelines/dynamic_linear.py
{ "start": 1738, "end": 6307 }
class ____(SklearnIndividual): # takes in a single search space. # will produce a pipeline of variable length. Each step in the pipeline will be pulled from the search space provided. def __init__(self, search_space : SearchSpace, max_length: int , rng=None) -> None: super().__init__() rng = np.random.default_rng(rng) self.search_space = search_space self.min_length = 1 self.max_length = max_length self.pipeline = self._generate_pipeline(rng) def _generate_pipeline(self, rng=None): rng = np.random.default_rng(rng) pipeline = [] length = rng.integers(self.min_length, self.max_length) length = min(length, 3) for _ in range(length): pipeline.append(self.search_space.generate(rng)) return pipeline def mutate(self, rng=None): rng = np.random.default_rng(rng) options = [] if len(self.pipeline) > self.min_length: options.append(self._mutate_remove_node) if len(self.pipeline) < self.max_length: options.append(self._mutate_add_node) options.append(self._mutate_step) return rng.choice(options)(rng) def _mutate_add_node(self, rng=None): rng = np.random.default_rng(rng) new_node = self.search_space.generate(rng) idx = rng.integers(len(self.pipeline)) self.pipeline.insert(idx, new_node) def _mutate_remove_node(self, rng=None): rng = np.random.default_rng(rng) idx = rng.integers(len(self.pipeline)) self.pipeline.pop(idx) def _mutate_step(self, rng=None): #choose a random step in the pipeline and mutate it rng = np.random.default_rng(rng) step = rng.choice(self.pipeline) return step.mutate(rng) def crossover(self, other, rng=None): #swap a random step in the pipeline with the corresponding step in the other pipeline rng = np.random.default_rng(rng) cx_funcs = [self._crossover_swap_multiple_nodes, self._crossover_node] rng.shuffle(cx_funcs) for cx_func in cx_funcs: if cx_func(other, rng): return True return False def _crossover_swap_multiple_nodes(self, other, rng): rng = np.random.default_rng(rng) max_steps = int(min(len(self.pipeline), len(other.pipeline))/2) max_steps = max(max_steps, 1) if max_steps == 1: n_steps_to_swap = 1 else: n_steps_to_swap = rng.integers(1, max_steps) other_indexes_to_take = rng.choice(len(other.pipeline), n_steps_to_swap, replace=False) self_indexes_to_replace = rng.choice(len(self.pipeline), n_steps_to_swap, replace=False) # self.pipeline[self_indexes_to_replace], other.pipeline[other_indexes_to_take] = other.pipeline[other_indexes_to_take], self.pipeline[self_indexes_to_replace] for self_idx, other_idx in zip(self_indexes_to_replace, other_indexes_to_take): self.pipeline[self_idx], other.pipeline[other_idx] = other.pipeline[other_idx], self.pipeline[self_idx] return True def _crossover_swap_node(self, other, rng): if len(self.pipeline) != len(other.pipeline): return False if len(self.pipeline) < 2: return False rng = np.random.default_rng(rng) idx = rng.integers(1,len(self.pipeline)) self.pipeline[idx], other.pipeline[idx] = other.pipeline[idx], self.pipeline[idx] return True def _crossover_node(self, other, rng): rng = np.random.default_rng(rng) pipeline1_indexes= list(range(len(self.pipeline))) pipeline2_indexes= list(range(len(other.pipeline))) rng.shuffle(pipeline1_indexes) rng.shuffle(pipeline2_indexes) crossover_success = False for idx1, idx2 in zip(pipeline1_indexes, pipeline2_indexes): if self.pipeline[idx1].crossover(other.pipeline[idx2], rng): crossover_success = True return crossover_success def export_pipeline(self, memory=None, **kwargs): return sklearn.pipeline.make_pipeline(*[step.export_pipeline(memory=memory, **kwargs) for step in self.pipeline], memory=memory) def unique_id(self): l = [step.unique_id() for step in self.pipeline] l = ["DynamicLinearPipeline"] + l return TupleIndex(tuple(l))
DynamicLinearPipelineIndividual
python
apache__thrift
test/py/TestClient.py
{ "start": 19638, "end": 22668 }
class ____(unittest.TestProgram): def parseArgs(self, argv): if args: self.testNames = args else: self.testNames = ([self.defaultTest]) self.createTests() if __name__ == "__main__": parser = OptionParser() parser.add_option('--libpydir', type='string', dest='libpydir', help='include this directory in sys.path for locating library code') parser.add_option('--genpydir', type='string', dest='genpydir', help='include this directory in sys.path for locating generated code') parser.add_option("--port", type="int", dest="port", help="connect to server at port") parser.add_option("--host", type="string", dest="host", help="connect to server") parser.add_option("--zlib", action="store_true", dest="zlib", help="use zlib wrapper for compressed transport") parser.add_option("--ssl", action="store_true", dest="ssl", help="use SSL for encrypted transport") parser.add_option("--http", dest="http_path", help="Use the HTTP transport with the specified path") parser.add_option('-v', '--verbose', action="store_const", dest="verbose", const=2, help="verbose output") parser.add_option('-q', '--quiet', action="store_const", dest="verbose", const=0, help="minimal output") parser.add_option('--protocol', dest="proto", type="string", help="protocol to use, one of: accel, accelc, binary, compact, header, json, multi, multia, multiac, multic, multih, multij") parser.add_option('--transport', dest="trans", type="string", help="transport to use, one of: buffered, framed, http") parser.add_option('--domain-socket', dest="domain_socket", type="string", help="Unix domain socket path") parser.set_defaults(framed=False, http_path=None, verbose=1, host='localhost', port=9090, proto='binary') options, args = parser.parse_args() if options.genpydir: sys.path.insert(0, os.path.join(SCRIPT_DIR, options.genpydir)) if options.http_path: options.trans = 'http' from ThriftTest import SecondService from ThriftTest import ThriftTest from ThriftTest.ttypes import Xtruct, Xtruct2, Numberz, Xception, Xception2 from thrift.Thrift import TException from thrift.transport import TTransport from thrift.transport import TSocket from thrift.transport import THttpClient from thrift.transport import TZlibTransport from thrift.protocol import TBinaryProtocol from thrift.protocol import TCompactProtocol from thrift.protocol import THeaderProtocol from thrift.protocol import TJSONProtocol from thrift.protocol import TMultiplexedProtocol OwnArgsTestProgram(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=1))
OwnArgsTestProgram
python
pyqtgraph__pyqtgraph
pyqtgraph/graphicsItems/GradientLegend.py
{ "start": 136, "end": 4925 }
class ____(UIGraphicsItem): """ Draws a color gradient rectangle along with text labels denoting the value at specific points along the gradient. """ def __init__(self, size, offset): self.size = size self.offset = offset UIGraphicsItem.__init__(self) self.setAcceptedMouseButtons(QtCore.Qt.MouseButton.NoButton) self.brush = QtGui.QBrush(QtGui.QColor(255,255,255,100)) # background color self.pen = QtGui.QPen(QtGui.QColor(0,0,0)) self.textPen = QtGui.QPen(QtGui.QColor(0,0,0)) self.labels = {'max': 1, 'min': 0} self.gradient = QtGui.QLinearGradient() self.gradient.setColorAt(0, QtGui.QColor(0,0,0)) self.gradient.setColorAt(1, QtGui.QColor(255,0,0)) self.setZValue(100) # draw on top of ordinary plots def setGradient(self, g): self.gradient = g self.update() def setColorMap(self, colormap): """ Set displayed gradient from a :class:`~pyqtgraph.ColorMap` object. """ self.gradient = colormap.getGradient() def setIntColorScale(self, minVal, maxVal, *args, **kargs): colors = [fn.intColor(i, maxVal-minVal, *args, **kargs) for i in range(minVal, maxVal)] g = QtGui.QLinearGradient() for i in range(len(colors)): x = float(i)/len(colors) g.setColorAt(x, colors[i]) self.setGradient(g) if 'labels' not in kargs: self.setLabels({str(minVal): 0, str(maxVal): 1}) else: self.setLabels({kargs['labels'][0]:0, kargs['labels'][1]:1}) def setLabels(self, l): """Defines labels to appear next to the color scale. Accepts a dict of {text: value} pairs""" self.labels = l self.update() def paint(self, p, opt, widget): UIGraphicsItem.paint(self, p, opt, widget) view = self.getViewBox() if view is None: return p.save() # save painter state before we change transformation trans = view.sceneTransform() p.setTransform( trans ) # draw in ViewBox pixel coordinates rect = view.rect() ## determine max width of all labels labelWidth = 0 labelHeight = 0 for k in self.labels: b = p.boundingRect(QtCore.QRectF(0, 0, 0, 0), QtCore.Qt.AlignmentFlag.AlignLeft | QtCore.Qt.AlignmentFlag.AlignVCenter, str(k)) labelWidth = max(labelWidth, b.width()) labelHeight = max(labelHeight, b.height()) textPadding = 2 # in px xR = rect.right() xL = rect.left() yT = rect.top() yB = rect.bottom() # coordinates describe edges of text and bar, additional margins will be added for background if self.offset[0] < 0: x3 = xR + self.offset[0] # right edge from right edge of view, offset is negative! x2 = x3 - labelWidth - 2*textPadding # right side of color bar x1 = x2 - self.size[0] # left side of color bar else: x1 = xL + self.offset[0] # left edge from left edge of view x2 = x1 + self.size[0] x3 = x2 + labelWidth + 2*textPadding # leave room for 2x textpadding between bar and text if self.offset[1] < 0: y2 = yB + self.offset[1] # bottom edge from bottom of view, offset is negative! y1 = y2 - self.size[1] else: y1 = yT + self.offset[1] # top edge from top of view y2 = y1 + self.size[1] self.b = [x1,x2,x3,y1,y2,labelWidth] ## Draw background p.setPen(self.pen) p.setBrush(self.brush) # background color rect = QtCore.QRectF( QtCore.QPointF(x1 - textPadding, y1-labelHeight/2 - textPadding), # extra left/top padding QtCore.QPointF(x3 + textPadding, y2+labelHeight/2 + textPadding) # extra bottom/right padding ) p.drawRect(rect) ## Draw color bar self.gradient.setStart(0, y2) self.gradient.setFinalStop(0, y1) p.setBrush(self.gradient) rect = QtCore.QRectF( QtCore.QPointF(x1, y1), QtCore.QPointF(x2, y2) ) p.drawRect(rect) ## draw labels p.setPen(self.textPen) tx = x2 + 2 * textPadding # margin between bar and text lh = labelHeight lw = labelWidth for k in self.labels: y = y2 - self.labels[k] * (y2-y1) p.drawText(QtCore.QRectF(tx, y - lh/2, lw, lh), QtCore.Qt.AlignmentFlag.AlignLeft | QtCore.Qt.AlignmentFlag.AlignVCenter, str(k)) p.restore() # restore QPainter transform to original state
GradientLegend
python
dagster-io__dagster
python_modules/dagster/dagster/_core/remote_representation/external_data.py
{ "start": 12444, "end": 13209 }
class ____: """Stores sensor metadata which is available in the Dagster UI. This is an unfortunate legacy class that is out of line with our preferred pattern of storing standard `Mapping[str, MetadataValue]` under the metadata field. Because this class already existed when adding this standard metadata to sensors, we stash it on here as a field under `standard_metadata`. """ asset_keys: Optional[Sequence[AssetKey]] standard_metadata: Optional[Mapping[str, MetadataValue]] = None @whitelist_for_serdes( storage_name="ExternalSensorData", storage_field_names={"job_name": "pipeline_name", "op_selection": "solid_selection"}, skip_when_empty_fields={"default_status", "sensor_type"}, ) @record_custom
SensorMetadataSnap
python
pytorch__pytorch
test/complex_tensor/test_complex_tensor.py
{ "start": 8126, "end": 9731 }
class ____(TestGradients): _default_dtype_check_enabled = True @ops( implemented_op_db, dtypes=OpDTypes.supported_backward, allowed_dtypes=[torch.complex128], ) def test_fn_grad(self, device: str, dtype: torch.dtype, op: OpInfo) -> None: test_info = Descriptor( op=get_overload_packet_from_name(op.name), device_type=torch.device(device).type, dtype=dtype, variant=Variant.GradCheck, ) for xfail_info, reason in SKIPS.items(): if xfail_info.matches(test_info): self.skipTest(reason) if dtype not in op.supported_backward_dtypes(torch.device(device).type): self.skipTest(f"Skipped! {dtype=} is not in supported backward dtypes!") with ComplexTensorMode(): op.gradcheck_fast_mode = False self._grad_test_helper(device, dtype, op, op.get_op()) instantiate_device_type_tests(TestComplexTensor, globals()) instantiate_device_type_tests(TestComplexBwdGradients, globals()) if dist.is_available(): from torch.testing._internal.common_distributed import MultiProcessTestCase @unMarkDynamoStrictTest class TestComplexDistributed(TestCase, MultiProcessTestCase): @ops(implemented_op_db, allowed_dtypes=list(COMPLEX_DTYPES)) def test_distributed(self, device, dtype, op: OpInfo): self.check_consistency(device, dtype, op, Variant.Distributed) instantiate_device_type_tests(TestComplexDistributed, globals()) if __name__ == "__main__": run_tests()
TestComplexBwdGradients
python
encode__django-rest-framework
tests/test_renderers.py
{ "start": 3658, "end": 3801 }
class ____(permissions.BasePermission): def has_permission(self, request, view): return request.method != 'POST'
POSTDeniedPermission
python
numpy__numpy
numpy/distutils/ccompiler_opt.py
{ "start": 23216, "end": 29727 }
class ____: """A helper class that provides a collection of fundamental methods implemented in a top of Python and NumPy Distutils. The idea behind this class is to gather all methods that it may need to override in case of reuse 'CCompilerOpt' in environment different than of what NumPy has. Parameters ---------- ccompiler : `CCompiler` The generate instance that returned from `distutils.ccompiler.new_compiler()`. """ def __init__(self, ccompiler): self._ccompiler = ccompiler def dist_compile(self, sources, flags, ccompiler=None, **kwargs): """Wrap CCompiler.compile()""" assert(isinstance(sources, list)) assert(isinstance(flags, list)) flags = kwargs.pop("extra_postargs", []) + flags if not ccompiler: ccompiler = self._ccompiler return ccompiler.compile(sources, extra_postargs=flags, **kwargs) def dist_test(self, source, flags, macros=[]): """Return True if 'CCompiler.compile()' able to compile a source file with certain flags. """ assert(isinstance(source, str)) from distutils.errors import CompileError cc = self._ccompiler; bk_spawn = getattr(cc, 'spawn', None) if bk_spawn: cc_type = getattr(self._ccompiler, "compiler_type", "") if cc_type in ("msvc",): setattr(cc, 'spawn', self._dist_test_spawn_paths) else: setattr(cc, 'spawn', self._dist_test_spawn) test = False try: self.dist_compile( [source], flags, macros=macros, output_dir=self.conf_tmp_path ) test = True except CompileError as e: self.dist_log(str(e), stderr=True) if bk_spawn: setattr(cc, 'spawn', bk_spawn) return test def dist_info(self): """ Return a tuple containing info about (platform, compiler, extra_args), required by the abstract class '_CCompiler' for discovering the platform environment. This is also used as a cache factor in order to detect any changes happening from outside. """ if hasattr(self, "_dist_info"): return self._dist_info cc_type = getattr(self._ccompiler, "compiler_type", '') if cc_type in ("intelem", "intelemw"): platform = "x86_64" elif cc_type in ("intel", "intelw", "intele"): platform = "x86" else: from distutils.util import get_platform platform = get_platform() cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) if not cc_type or cc_type == "unix": if hasattr(cc_info, "__iter__"): compiler = cc_info[0] else: compiler = str(cc_info) else: compiler = cc_type if hasattr(cc_info, "__iter__") and len(cc_info) > 1: extra_args = ' '.join(cc_info[1:]) else: extra_args = os.environ.get("CFLAGS", "") extra_args += os.environ.get("CPPFLAGS", "") self._dist_info = (platform, compiler, extra_args) return self._dist_info @staticmethod def dist_error(*args): """Raise a compiler error""" from distutils.errors import CompileError raise CompileError(_Distutils._dist_str(*args)) @staticmethod def dist_fatal(*args): """Raise a distutils error""" from distutils.errors import DistutilsError raise DistutilsError(_Distutils._dist_str(*args)) @staticmethod def dist_log(*args, stderr=False): """Print a console message""" from numpy.distutils import log out = _Distutils._dist_str(*args) if stderr: log.warn(out) else: log.info(out) @staticmethod def dist_load_module(name, path): """Load a module from file, required by the abstract class '_Cache'.""" from .misc_util import exec_mod_from_location try: return exec_mod_from_location(name, path) except Exception as e: _Distutils.dist_log(e, stderr=True) return None @staticmethod def _dist_str(*args): """Return a string to print by log and errors.""" def to_str(arg): if not isinstance(arg, str) and hasattr(arg, '__iter__'): ret = [] for a in arg: ret.append(to_str(a)) return '('+ ' '.join(ret) + ')' return str(arg) stack = inspect.stack()[2] start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno) out = ' '.join([ to_str(a) for a in (*args,) ]) return start + out def _dist_test_spawn_paths(self, cmd, display=None): """ Fix msvc SDK ENV path same as distutils do without it we get c1: fatal error C1356: unable to find mspdbcore.dll """ if not hasattr(self._ccompiler, "_paths"): self._dist_test_spawn(cmd) return old_path = os.getenv("path") try: os.environ["path"] = self._ccompiler._paths self._dist_test_spawn(cmd) finally: os.environ["path"] = old_path _dist_warn_regex = re.compile( # intel and msvc compilers don't raise # fatal errors when flags are wrong or unsupported ".*(" "warning D9002|" # msvc, it should be work with any language. "invalid argument for option" # intel ").*" ) @staticmethod def _dist_test_spawn(cmd, display=None): try: o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, text=True) if o and re.match(_Distutils._dist_warn_regex, o): _Distutils.dist_error( "Flags in command", cmd ,"aren't supported by the compiler" ", output -> \n%s" % o ) except subprocess.CalledProcessError as exc: o = exc.output s = exc.returncode except OSError as e: o = e s = 127 else: return None _Distutils.dist_error( "Command", cmd, "failed with exit status %d output -> \n%s" % ( s, o )) _share_cache = {}
_Distutils
python
instagram__MonkeyType
demo/models.py
{ "start": 1609, "end": 1980 }
class ____(InboxEvent): type = EventType.LIKED def __init__( self, id: InboxEventId, user_id: UserId, published: datetime, feedentry_id: FeedEntryId, liker_id: UserId, ) -> None: super().__init__(id, user_id, published) self.feedentry_id = feedentry_id self.liker_id = liker_id
LikedEvent
python
falconry__falcon
tests/_wsgi_test_app.py
{ "start": 739, "end": 1379 }
class ____: def on_get(self, req, resp): resp.set_header('X-Falcon', 'peregrine') resp.content_type = falcon.MEDIA_TEXT resp.text = 'Hello, World!\n' def on_get_deprecated(self, req, resp): resp.set_header('X-Falcon', 'deprecated') resp.content_type = falcon.MEDIA_TEXT resp.text = 'Hello, World!\n' resp.add_link('/removed-methods/add-link', 'bookmark') app = application = falcon.App() app.add_route('/forms', Forms()) app.add_route('/hello', Hello()) app.add_route('/deprecated', Hello(), suffix='deprecated') app.add_static_route('/tests', HERE, downloadable=True)
Hello
python
arrow-py__arrow
arrow/locales.py
{ "start": 60038, "end": 60670 }
class ____(ArabicLocale): names = ["ar-iq", "ar-jo", "ar-lb", "ar-ps", "ar-sy"] month_names = [ "", "كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول", ] month_abbreviations = [ "", "كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول", ]
LevantArabicLocale
python
django__django
tests/requests_tests/test_data_upload_settings.py
{ "start": 562, "end": 1481 }
class ____(SimpleTestCase): def setUp(self): payload = FakePayload("a=1&a=2&a=3\r\n") self.request = WSGIRequest( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": "application/x-www-form-urlencoded", "CONTENT_LENGTH": len(payload), "wsgi.input": payload, } ) def test_size_exceeded(self): with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=12): with self.assertRaisesMessage(RequestDataTooBig, TOO_MUCH_DATA_MSG): self.request._load_post_and_files() def test_size_not_exceeded(self): with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=13): self.request._load_post_and_files() def test_no_limit(self): with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=None): self.request._load_post_and_files()
DataUploadMaxMemorySizeFormPostTests
python
pytorch__pytorch
torch/futures/__init__.py
{ "start": 300, "end": 14435 }
class ____(torch._C.Future, Generic[T]): r""" Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It also exposes a set of APIs to add callback functions and set results. .. warning:: GPU support is a beta feature, subject to changes. """ def __init__( self, *, devices: Optional[list[Union[int, str, torch.device]]] = None ): r""" Create an empty unset ``Future``. If the future is intended to hold values containing CUDA tensors, (a superset of) their CUDA devices must be specified at construction. (This is only supported if ``torch.cuda.is_available()`` returns ``True``). This is needed to ensure proper CUDA stream synchronization. The child futures, returned by the ``then`` method, will inherit these devices. Args: devices(``List[Union[int, str, torch.device]]``, optional): the set of devices on which tensors contained in this future's value are allowed to reside and on which callbacks are allowed to operate. """ if devices is None: devices = [] super().__init__([torch.device(d) for d in devices]) def done(self) -> bool: r""" Return ``True`` if this ``Future`` is done. A ``Future`` is done if it has a result or an exception. If the value contains tensors that reside on GPUs, ``Future.done()`` will return ``True`` even if the asynchronous kernels that are populating those tensors haven't yet completed running on the device, because at such stage the result is already usable, provided one performs the appropriate synchronizations (see :meth:`wait`). """ return super().done() def wait(self) -> T: r""" Block until the value of this ``Future`` is ready. If the value contains tensors that reside on GPUs, then an additional synchronization is performed with the kernels (executing on the device) which may be asynchronously populating those tensors. Such sync is non-blocking, which means that ``wait()`` will insert the necessary instructions in the current streams to ensure that further operations enqueued on those streams will be properly scheduled after the async kernels but, once that is done, ``wait()`` will return, even if those kernels are still running. No further synchronization is required when accessing and using the values, as long as one doesn't change streams. Returns: The value held by this ``Future``. If the function (callback or RPC) creating the value has thrown an error, this ``wait`` method will also throw an error. """ return super().wait() def value(self) -> T: r""" Obtain the value of an already-completed future. This method should only be called after a call to :meth:`wait` has completed, or inside a callback function passed to :meth:`then`. In other cases this ``Future`` may not yet hold a value and calling ``value()`` could fail. If the value contains tensors that reside on GPUs, then this method will *not* perform any additional synchronization. This should be done beforehand, separately, through a call to :meth:`wait` (except within callbacks, for which it's already being taken care of by :meth:`then`). Returns: The value held by this ``Future``. If the function (callback or RPC) creating the value has thrown an error, this ``value()`` method will also throw an error. """ return super().value() def then(self, callback: Callable[[Future[T]], S]) -> Future[S]: r""" Append the given callback function to this ``Future``, which will be run when the ``Future`` is completed. Multiple callbacks can be added to the same ``Future``, but the order in which they will be executed cannot be guaranteed (to enforce a certain order consider chaining: ``fut.then(cb1).then(cb2)``). The callback must take one argument, which is the reference to this ``Future``. The callback function can use the :meth:`value` method to get the value. Note that if this ``Future`` is already completed, the given callback will be run immediately inline. If the ``Future``'s value contains tensors that reside on GPUs, the callback might be invoked while the async kernels that are populating those tensors haven't yet finished executing on the device. However, the callback will be invoked with some dedicated streams set as current (fetched from a global pool) which will be synchronized with those kernels. Hence any operation performed by the callback on these tensors will be scheduled on the device after the kernels complete. In other words, as long as the callback doesn't switch streams, it can safely manipulate the result without any additional synchronization. This is similar to the non-blocking behavior of :meth:`wait`. Similarly, if the callback returns a value that contains tensors that reside on a GPU, it can do so even if the kernels that are producing these tensors are still running on the device, as long as the callback didn't change streams during its execution. If one wants to change streams, one must be careful to re-synchronize them with the original streams, that is, those that were current when the callback was invoked. Args: callback(``Callable``): a ``Callable`` that takes this ``Future`` as the only argument. Returns: A new ``Future`` object that holds the return value of the ``callback`` and will be marked as completed when the given ``callback`` finishes. .. note:: Note that if the callback function throws, either through the original future being completed with an exception and calling ``fut.wait()``, or through other code in the callback, the future returned by ``then`` will be marked appropriately with the encountered error. However, if this callback later completes additional futures, those futures are not marked as completed with an error and the user is responsible for handling completion/waiting on those futures independently. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> def callback(fut): ... print(f"RPC return value is {fut.wait()}.") >>> fut = torch.futures.Future() >>> # The inserted callback will print the return value when >>> # receiving the response from "worker1" >>> cb_fut = fut.then(callback) >>> chain_cb_fut = cb_fut.then( ... lambda x : print(f"Chained cb done. {x.wait()}") ... ) >>> fut.set_result(5) RPC return value is 5. Chained cb done. None """ return cast(Future[S], super().then(callback)) def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None: r""" Append the given callback function to this ``Future``, which will be run when the ``Future`` is completed. Multiple callbacks can be added to the same ``Future``, but the order in which they will be executed cannot be guaranteed. The callback must take one argument, which is the reference to this ``Future``. The callback function can use the :meth:`value` method to get the value. Note that if this ``Future`` is already completed, the given callback will be run inline. We recommend that you use the :meth:`then` method as it provides a way to synchronize after your callback has completed. ``add_done_callback`` can be cheaper if your callback does not return anything. But both :meth:`then` and ``add_done_callback`` use the same callback registration API under the hood. With respect to GPU tensors, this method behaves in the same way as :meth:`then`. Args: callback(``Future``): a ``Callable`` that takes in one argument, which is the reference to this ``Future``. .. note:: Note that if the callback function throws, either through the original future being completed with an exception and calling ``fut.wait()``, or through other code in the callback, error handling must be carefully taken care of. For example, if this callback later completes additional futures, those futures are not marked as completed with an error and the user is responsible for handling completion/waiting on those futures independently. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> def callback(fut): ... print("This will run after the future has finished.") ... print(fut.wait()) >>> fut = torch.futures.Future() >>> fut.add_done_callback(callback) >>> fut.set_result(5) This will run after the future has finished. 5 """ super().add_done_callback(callback) def set_result(self, result: T) -> None: r""" Set the result for this ``Future``, which will mark this ``Future`` as completed and trigger all attached callbacks. Note that a ``Future`` cannot be marked completed twice. If the result contains tensors that reside on GPUs, this method can be called even if the asynchronous kernels that are populating those tensors haven't yet completed running on the device, provided that the streams on which those kernels were enqueued are set as the current ones when this method is called. Put simply, it's safe to call this method immediately after launching those kernels, without any additional synchronization, as long as one doesn't change streams in between. This method will record events on all the relevant current streams and will use them to ensure proper scheduling for all the consumers of this ``Future``. Args: result (object): the result object of this ``Future``. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> import threading >>> import time >>> def slow_set_future(fut, value): ... time.sleep(0.5) ... fut.set_result(value) >>> fut = torch.futures.Future() >>> t = threading.Thread( ... target=slow_set_future, ... args=(fut, torch.ones(2) * 3) ... ) >>> t.start() >>> print(fut.wait()) tensor([3., 3.]) >>> t.join() """ super().set_result(result) def set_exception(self, result: T) -> None: r""" Set an exception for this ``Future``, which will mark this ``Future`` as completed with an error and trigger all attached callbacks. Note that when calling wait()/value() on this ``Future``, the exception set here will be raised inline. Args: result (BaseException): the exception for this ``Future``. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> fut = torch.futures.Future() >>> fut.set_exception(ValueError("foo")) >>> fut.wait() Traceback (most recent call last): ... ValueError: foo """ assert isinstance(result, Exception), ( f"{result} is of type {type(result)}, not an Exception." ) def raise_error(fut_result): raise fut_result super()._set_unwrap_func(raise_error) self.set_result(result) # type: ignore[arg-type] def collect_all(futures: list[Future]) -> Future[list[Future]]: r""" Collects the provided :class:`~torch.futures.Future` objects into a single combined :class:`~torch.futures.Future` that is completed when all of the sub-futures are completed. Args: futures (list): a list of :class:`~torch.futures.Future` objects. Returns: Returns a :class:`~torch.futures.Future` object to a list of the passed in Futures. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> fut0 = torch.futures.Future() >>> fut1 = torch.futures.Future() >>> fut = torch.futures.collect_all([fut0, fut1]) >>> fut0.set_result(0) >>> fut1.set_result(1) >>> fut_list = fut.wait() >>> print(f"fut0 result = {fut_list[0].wait()}") fut0 result = 0 >>> print(f"fut1 result = {fut_list[1].wait()}") fut1 result = 1 """ return cast( Future[list[Future]], torch._C._collect_all(cast(list[torch._C.Future], futures)), ) def wait_all(futures: list[Future]) -> list: r""" Waits for all provided futures to be complete, and returns the list of completed values. If any of the futures encounters an error, the method will exit early and report the error not waiting for other futures to complete. Args: futures (list): a list of :class:`~torch.futures.Future` object. Returns: A list of the completed :class:`~torch.futures.Future` results. This method will throw an error if ``wait`` on any :class:`~torch.futures.Future` throws. """ return [ fut.wait() for fut in torch._C._collect_all(cast(list[torch._C.Future], futures)).wait() ]
Future
python
tensorflow__tensorflow
tensorflow/python/feature_column/feature_column_test.py
{ "start": 27417, "end": 37303 }
class ____(test.TestCase): def test_defaults(self): a = fc._categorical_column_with_hash_bucket('aaa', 10) self.assertEqual('aaa', a.name) self.assertEqual('aaa', a._var_scope_name) self.assertEqual('aaa', a.key) self.assertEqual(10, a.hash_bucket_size) self.assertEqual(dtypes.string, a.dtype) def test_key_should_be_string(self): with self.assertRaisesRegex(ValueError, 'key must be a string.'): fc._categorical_column_with_hash_bucket(('key',), 10) def test_bucket_size_should_be_given(self): with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be set.'): fc._categorical_column_with_hash_bucket('aaa', None) def test_bucket_size_should_be_positive(self): with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be at least 1'): fc._categorical_column_with_hash_bucket('aaa', 0) def test_dtype_should_be_string_or_integer(self): fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.string) fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32) with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'): fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32) def test_deep_copy(self): original = fc._categorical_column_with_hash_bucket('aaa', 10) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(10, column.hash_bucket_size) self.assertEqual(10, column._num_buckets) self.assertEqual(dtypes.string, column.dtype) def test_parse_spec_string(self): a = fc._categorical_column_with_hash_bucket('aaa', 10) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.string) }, a._parse_example_spec) def test_parse_spec_int(self): a = fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, a._parse_example_spec) def test_parse_example(self): a = fc._categorical_column_with_hash_bucket('aaa', 10) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature(bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) def test_strings_should_be_hashed(self): hashed_sparse = fc._categorical_column_with_hash_bucket('wire', 10) wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) outputs = _transform_features({'wire': wire_tensor}, [hashed_sparse]) output = outputs[hashed_sparse] # Check exact hashed output. If hashing changes this test will break. expected_values = [6, 4, 1] with self.cached_session(): self.assertEqual(dtypes.int64, output.values.dtype) self.assertAllEqual(expected_values, output.values) self.assertAllEqual(wire_tensor.indices, output.indices) self.assertAllEqual(wire_tensor.dense_shape, output.dense_shape) def test_tensor_dtype_should_be_string_or_integer(self): string_fc = fc._categorical_column_with_hash_bucket( 'a_string', 10, dtype=dtypes.string) int_fc = fc._categorical_column_with_hash_bucket( 'a_int', 10, dtype=dtypes.int32) float_fc = fc._categorical_column_with_hash_bucket( 'a_float', 10, dtype=dtypes.string) int_tensor = sparse_tensor.SparseTensor( values=[101], indices=[[0, 0]], dense_shape=[1, 1]) string_tensor = sparse_tensor.SparseTensor( values=['101'], indices=[[0, 0]], dense_shape=[1, 1]) float_tensor = sparse_tensor.SparseTensor( values=[101.], indices=[[0, 0]], dense_shape=[1, 1]) builder = _LazyBuilder({ 'a_int': int_tensor, 'a_string': string_tensor, 'a_float': float_tensor }) builder.get(string_fc) builder.get(int_fc) with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'): builder.get(float_fc) def test_dtype_should_match_with_tensor(self): hashed_sparse = fc._categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) builder = _LazyBuilder({'wire': wire_tensor}) with self.assertRaisesRegex(ValueError, 'dtype must be compatible'): builder.get(hashed_sparse) def test_ints_should_be_hashed(self): hashed_sparse = fc._categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=[101, 201, 301], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) builder = _LazyBuilder({'wire': wire_tensor}) output = builder.get(hashed_sparse) # Check exact hashed output. If hashing changes this test will break. expected_values = [3, 7, 5] with self.cached_session(): self.assertAllEqual(expected_values, output.values) def test_int32_64_is_compatible(self): hashed_sparse = fc._categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=constant_op.constant([101, 201, 301], dtype=dtypes.int32), indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) builder = _LazyBuilder({'wire': wire_tensor}) output = builder.get(hashed_sparse) # Check exact hashed output. If hashing changes this test will break. expected_values = [3, 7, 5] self.assertAllEqual(expected_values, output.values) def test_get_sparse_tensors(self): hashed_sparse = fc._categorical_column_with_hash_bucket('wire', 10) builder = _LazyBuilder({ 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) }) id_weight_pair = hashed_sparse._get_sparse_tensors(builder) self.assertIsNone(id_weight_pair.weight_tensor) self.assertEqual(builder.get(hashed_sparse), id_weight_pair.id_tensor) def test_get_sparse_tensors_weight_collections(self): column = fc._categorical_column_with_hash_bucket('aaa', 10) inputs = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) column._get_sparse_tensors( _LazyBuilder({ 'aaa': inputs }), weight_collections=('my_weights',)) self.assertCountEqual([], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)) self.assertCountEqual([], ops.get_collection('my_weights')) def test_get_sparse_tensors_dense_input(self): hashed_sparse = fc._categorical_column_with_hash_bucket('wire', 10) builder = _LazyBuilder({'wire': (('omar', ''), ('stringer', 'marlo'))}) id_weight_pair = hashed_sparse._get_sparse_tensors(builder) self.assertIsNone(id_weight_pair.weight_tensor) self.assertEqual(builder.get(hashed_sparse), id_weight_pair.id_tensor) def test_linear_model(self): wire_column = fc._categorical_column_with_hash_bucket('wire', 4) self.assertEqual(4, wire_column._num_buckets) with ops.Graph().as_default(): predictions = fc.linear_model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 3: wire_var[3] = 4 # 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6 self.assertAllClose(((4.,), (6.,)), self.evaluate(predictions)) def test_keras_linear_model(self): wire_column = fc._categorical_column_with_hash_bucket('wire', 4) self.assertEqual(4, wire_column._num_buckets) with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 3: wire_var[3] = 4 # 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6 self.assertAllClose(((4.,), (6.,)), self.evaluate(predictions))
HashedCategoricalColumnTest
python
sqlalchemy__sqlalchemy
test/sql/test_operators.py
{ "start": 12510, "end": 20909 }
class ____(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = "default" @testing.combinations(True, False, argnames="reverse") @testing.combinations(True, False, argnames="negate") def test_associatives_mismatched_type(self, reverse, negate): """test we get two separate exprs if the types dont match, operator is not lost. the expressions here don't generally make sense from a SQL perspective, we are checking just that the operators / parenthesis / negation works out in the SQL string to reasonably correspond to what the Python structures look like. """ expr1 = column("i1", Integer) + column("i2", Integer) expr2 = column("d1", String) + column("d2", String) if reverse: expr = expr2 + expr1 self.assert_compile( select(expr), "SELECT (d1 || d2) + i1 + i2 AS anon_1" ) else: expr = expr1 + expr2 self.assert_compile( select(expr), "SELECT i1 + i2 + (d1 || d2) AS anon_1" ) @testing.combinations( operators.add, operators.mul, argnames="op", ) @testing.combinations(True, False, argnames="reverse") @testing.combinations(True, False, argnames="negate") def test_parenthesized_exprs(self, op, reverse, negate): t1 = table("t", column("q"), column("p")) inner = lambda: t1.c.q - t1.c.p # noqa E371 expr = op(inner(), inner()) if reverse: for i in range(8): expr = op(inner(), expr) else: for i in range(8): expr = op(expr, inner()) opstring = compiler.OPERATORS[op] exprs = opstring.join("(t.q - t.p)" for i in range(10)) if negate: self.assert_compile( select(~expr), f"SELECT NOT ({exprs}) AS anon_1 FROM t" ) else: self.assert_compile( select(expr), f"SELECT {exprs} AS anon_1 FROM t" ) @testing.combinations( ( lambda p, q: (1 - p) * (2 - q) + 10 * (3 - p) * (4 - q), "(:p_1 - t.p) * (:q_1 - t.q) + " ":param_1 * (:p_2 - t.p) * (:q_2 - t.q)", ), ( lambda p, q: (1 - p) * (2 - q) * (3 - p) * (4 - q), "(:p_1 - t.p) * (:q_1 - t.q) * (:p_2 - t.p) * (:q_2 - t.q)", ), ( lambda p, q: ( (1 + p + 5) * (p * (q - 5) * (p + 8)) * (q + (p - 3) + (q - 5) + (p - 9)) * (4 + q + 9) ), "(:p_1 + t.p + :param_1) * " "t.p * (t.q - :q_1) * (t.p + :p_2) * " "(t.q + (t.p - :p_3) + (t.q - :q_2) + (t.p - :p_4)) * " "(:q_3 + t.q + :param_2)", ), ( lambda p, q: (1 // p) - (2 // q) - (3 // p) - (4 // q), "((:p_1 / t.p - :q_1 / t.q) - :p_2 / t.p) - :q_2 / t.q", ), ( lambda p, q: (1 + p) - (2 + q) - (3 + p) - (4 + q), "(((:p_1 + t.p) - (:q_1 + t.q)) - (:p_2 + t.p)) - (:q_2 + t.q)", ), ( lambda p, q: (1 + p) * 3 * (2 + q) * 4 * (3 + p) - (4 + q), "(:p_1 + t.p) * :param_1 * (:q_1 + t.q) * " ":param_2 * (:p_2 + t.p) - (:q_2 + t.q)", ), argnames="expr, expected", ) def test_other_exprs(self, expr, expected): t = table("t", column("q", Integer), column("p", Integer)) expr = resolve_lambda(expr, p=t.c.p, q=t.c.q) self.assert_compile(expr, expected) @testing.combinations( operators.add, operators.and_, operators.or_, operators.mul, argnames="op", ) @testing.combinations(True, False, argnames="reverse") @testing.combinations(True, False, argnames="negate") def test_associatives(self, op, reverse, negate): t1 = table("t", column("q"), column("p")) num = 500 expr = op(t1.c.q, t1.c.p) if reverse: for i in range(num - 1, -1, -1): expr = op(column(f"d{i}"), expr) else: for i in range(num): expr = op(expr, column(f"d{i}")) opstring = compiler.OPERATORS[op] exprs = opstring.join(f"d{i}" for i in range(num)) if negate: self.assert_compile( select(~expr), ( f"SELECT NOT (t.q{opstring}t.p{opstring}{exprs}) " "AS anon_1 FROM t" if not reverse else f"SELECT NOT ({exprs}{opstring}t.q{opstring}t.p) " "AS anon_1 FROM t" ), ) else: self.assert_compile( select(expr), ( f"SELECT t.q{opstring}t.p{opstring}{exprs} " "AS anon_1 FROM t" if not reverse else f"SELECT {exprs}{opstring}t.q{opstring}t.p " "AS anon_1 FROM t" ), ) @testing.combinations( operators.gt, operators.eq, operators.le, operators.sub, argnames="op", ) @testing.combinations(True, False, argnames="reverse") @testing.combinations(True, False, argnames="negate") def test_non_associatives(self, op, reverse, negate): """similar tests as test_associatives but for non-assoc operators. the expressions here don't generally make sense from a SQL perspective, we are checking just that the operators / parenthesis / negation works out in the SQL string to reasonably correspond to what the Python structures look like. """ t1 = table("t", column("q"), column("p")) num = 5 expr = op(t1.c.q, t1.c.p) if reverse: for i in range(num - 1, -1, -1): expr = op(column(f"d{i}"), expr) else: for i in range(num): expr = op(expr, column(f"d{i}")) opstring = compiler.OPERATORS[op] if negate: negate_op = { operators.gt: operators.le, operators.eq: operators.ne, operators.le: operators.gt, }.get(op, None) if negate_op: negate_opstring = compiler.OPERATORS[negate_op] if reverse: str_expr = ( f"d0{negate_opstring}(d1{opstring}(d2{opstring}" f"(d3{opstring}(d4{opstring}(t.q{opstring}t.p)))))" ) else: str_expr = ( f"(((((t.q{opstring}t.p){opstring}d0){opstring}d1)" f"{opstring}d2){opstring}d3){negate_opstring}d4" ) else: if reverse: str_expr = ( f"NOT (d0{opstring}(d1{opstring}(d2{opstring}" f"(d3{opstring}(d4{opstring}(t.q{opstring}t.p))))))" ) else: str_expr = ( f"NOT ((((((t.q{opstring}t.p){opstring}d0)" f"{opstring}d1){opstring}d2){opstring}d3){opstring}d4)" ) self.assert_compile( select(~expr), ( f"SELECT {str_expr} AS anon_1 FROM t" if not reverse else f"SELECT {str_expr} AS anon_1 FROM t" ), ) else: if reverse: str_expr = ( f"d0{opstring}(d1{opstring}(d2{opstring}" f"(d3{opstring}(d4{opstring}(t.q{opstring}t.p)))))" ) else: str_expr = ( f"(((((t.q{opstring}t.p){opstring}d0)" f"{opstring}d1){opstring}d2){opstring}d3){opstring}d4" ) self.assert_compile( select(expr), ( f"SELECT {str_expr} AS anon_1 FROM t" if not reverse else f"SELECT {str_expr} AS anon_1 FROM t" ), )
MultiElementExprTest
python
getsentry__sentry
src/sentry/hybridcloud/rpc/caching/service.py
{ "start": 6198, "end": 12470 }
class ____(Generic[_R]): """ Get a multiple records from cache or wrapped function. When cache read returns no or partial data, the wrapped function will be invoked with keys missing data. The result of the wrapped function will then be stored in cache. Ideal for 'get many by id' style methods. """ silo_mode: SiloMode base_key: str cb: Callable[[list[int]], list[_R]] type_: type[_R] timeout: int | None def __init__( self, base_key: str, silo_mode: SiloMode, cb: Callable[[list[int]], list[_R]], t: type[_R], timeout: int | None = None, ): self.base_key = base_key self.silo_mode = silo_mode self.cb = cb self.type_ = t self.timeout = timeout def __call__(self, ids: list[int]) -> list[_R]: if ( SiloMode.get_current_mode() != self.silo_mode and SiloMode.get_current_mode() != SiloMode.MONOLITH ): return self.cb(ids) return self.get_many(ids) def key_from(self, object_id: int) -> str: return f"{self.base_key}:{object_id}" def get_many(self, ids: list[int]) -> list[_R]: from .impl import _consume_generator, _delete_cache, _get_cache, _set_cache keys = {i: self.key_from(i) for i in ids} cache_values = _consume_generator(_get_cache(list(keys.values()), self.silo_mode)) # Mapping between object_id and cache versions missing: dict[int, int] = {} found: dict[int, _R] = {} for object_id, cache_key in keys.items(): version: int | None = None cache_value = cache_values[cache_key] if isinstance(cache_value, str): # Found data in cache try: found[object_id] = self.type_(**json.loads(cache_value)) except (pydantic.ValidationError, JSONDecodeError, TypeError): version = _consume_generator(_delete_cache(cache_key, self.silo_mode)) else: # Data was missing in cache but we have a version for the cache key version = cache_value if version is not None: missing[object_id] = version missing_keys = list(missing.keys()) metrics.incr( "hybridcloud.caching.many.rpc", len(missing_keys), tags={"base_key": self.base_key} ) metrics.incr( "hybridcloud.caching.many.cached", len(found), tags={"base_key": self.base_key} ) # This result could have different order than missing_object_ids, or have gaps cb_result = self.cb(missing_keys) for record in cb_result: # TODO(hybridcloud) The types/interfaces don't make reading this attribute safe. # We rely on a convention of records having `id` for now. In the future # this could become a decorator parameter instead. record_id = getattr(record, "id") if record_id is None: continue cache_key = keys[record_id] record_version = missing[record_id] _consume_generator(_set_cache(cache_key, record.json(), record_version, self.timeout)) found[record_id] = record return [found[id] for id in ids if id in found] def back_with_silo_cache( base_key: str, silo_mode: SiloMode, t: type[_R], timeout: int | None = None ) -> Callable[[Callable[[int], _R | None]], "SiloCacheBackedCallable[_R]"]: """ Decorator for adding local caching to RPC operations on a single record. This decorator can be applied to RPC methods that fetch a single object. If the cache read fails, the decorated function will be called and its result will be stored in cache. The decorator adds helper methods on the wrapped function for generating keys to clear cache entries with region_caching_service and control_caching_service. See user_service.get_user() for an example usage. """ def wrapper(cb: Callable[[int], _R | None]) -> "SiloCacheBackedCallable[_R]": return SiloCacheBackedCallable(base_key, silo_mode, cb, t, timeout) return wrapper def back_with_silo_cache_many( base_key: str, silo_mode: SiloMode, t: type[_R], timeout: int | None = None ) -> Callable[[Callable[[list[int]], list[_R]]], "SiloCacheManyBackedCallable[_R]"]: """ Decorator for adding local caching to RPC operations that fetch many records by id. This decorator can be applied to RPC methods that fetch multiple objects. First all ids will be read from cache. Any records that were not available in cache will be forwarded to the wrapped method. The result of the wrapped method will be stored in cache for future use. Like `back_with_silo_cache`, this decorator adds helpers to the wrapped function for generating keys to clear cache. """ def wrapper(cb: Callable[[list[int]], list[_R]]) -> "SiloCacheManyBackedCallable[_R]": return SiloCacheManyBackedCallable(base_key, silo_mode, cb, t, timeout) return wrapper def back_with_silo_cache_list( base_key: str, silo_mode: SiloMode, t: type[_R], timeout: int | None = None ) -> Callable[[Callable[[int], list[_R]]], "SiloCacheBackedListCallable[_R]"]: """ Decorator for adding local caching to RPC operations for list results This decorator can be applied to RPC methods that fetch a list of results based on a single input id. This works well with methods that get a list of results based on an organization or user id. If the cache read for the id value fails, the decorated function will be called and its result will be stored in cache. The decorator also adds method on the wrapped function for generating keys to clear cache entires with with region_caching_service and control_caching_service. See app_service.installations_for_organization() for an example usage. """ def wrapper(cb: Callable[[int], list[_R]]) -> "SiloCacheBackedListCallable[_R]": return SiloCacheBackedListCallable(base_key, silo_mode, cb, t, timeout) return wrapper region_caching_service = RegionCachingService.create_delegation()
SiloCacheManyBackedCallable
python
ray-project__ray
doc/source/serve/doc_code/test_service_pb2_grpc.py
{ "start": 2738, "end": 4450 }
class ____(object): """Missing associated documentation comment in .proto file.""" @staticmethod def Ping( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_unary( request, target, "/ray.rpc.TestService/Ping", src_dot_ray_dot_protobuf_dot_test__service__pb2.PingRequest.SerializeToString, src_dot_ray_dot_protobuf_dot_test__service__pb2.PingReply.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, ) @staticmethod def PingTimeout( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_unary( request, target, "/ray.rpc.TestService/PingTimeout", src_dot_ray_dot_protobuf_dot_test__service__pb2.PingTimeoutRequest.SerializeToString, src_dot_ray_dot_protobuf_dot_test__service__pb2.PingTimeoutReply.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, )
TestService
python
huggingface__transformers
src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
{ "start": 16094, "end": 20140 }
class ____(nn.Module): """Construct a SeamlessM4Tv2ConformerSelfAttention object. Can be enhanced with relative position embeddings. """ def __init__(self, config, use_position_embeddings=True): super().__init__() self.head_size = config.hidden_size // config.speech_encoder_attention_heads self.num_heads = config.speech_encoder_attention_heads self.position_embeddings_type = config.position_embeddings_type if use_position_embeddings else None self.linear_q = nn.Linear(config.hidden_size, config.hidden_size) self.linear_k = nn.Linear(config.hidden_size, config.hidden_size) self.linear_v = nn.Linear(config.hidden_size, config.hidden_size) self.linear_out = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(p=config.speech_encoder_dropout) if self.position_embeddings_type == "relative_key": self.left_max_position_embeddings = config.left_max_position_embeddings self.right_max_position_embeddings = config.right_max_position_embeddings num_positions = self.left_max_position_embeddings + self.right_max_position_embeddings + 1 self.distance_embedding = nn.Embedding(num_positions, self.head_size) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: # self-attention mechanism batch_size, sequence_length, hidden_size = hidden_states.size() # make sure query/key states can be != value states query_key_states = hidden_states value_states = hidden_states # project query_key_states and value_states query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size) # => (batch, head, time1, d_k) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) attn_weights = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size) if self.position_embeddings_type == "relative_key": query_length, key_length = query.shape[2], key.shape[2] position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_r - position_ids_l distance = torch.clamp(distance, -self.left_max_position_embeddings, self.right_max_position_embeddings) positional_embedding = self.distance_embedding(distance + self.left_max_position_embeddings) positional_embedding = positional_embedding.to(dtype=query.dtype) # fp16 compatibility relative_position_attn_weights = torch.einsum("bhld,lrd->bhlr", query, positional_embedding) attn_weights = attn_weights + (relative_position_attn_weights / math.sqrt(self.head_size)) # apply attention_mask if necessary if attention_mask is not None: attn_weights = attn_weights + attention_mask # => (batch, head, time1, time2) attn_weights = torch.softmax(attn_weights, dim=-1) attn_weights = self.dropout(attn_weights) # => (batch, head, time1, d_k) attn_output = torch.matmul(attn_weights, value) # => (batch, time1, hidden_size) attn_output = attn_output.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size) attn_output = self.linear_out(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights
SeamlessM4Tv2ConformerSelfAttention
python
pennersr__django-allauth
allauth/socialaccount/providers/wahoo/views.py
{ "start": 181, "end": 971 }
class ____(OAuth2Adapter): provider_id = "wahoo" access_token_url = "https://api.wahooligan.com/oauth/token" # nosec authorize_url = "https://api.wahooligan.com/oauth/authorize" profile_url = "https://api.wahooligan.com/v1/user" def complete_login(self, request, app, token, **kwargs): headers = {"Authorization": "Bearer {0}".format(token.token)} resp = ( get_adapter().get_requests_session().get(self.profile_url, headers=headers) ) resp.raise_for_status() extra_data = resp.json() return self.get_provider().sociallogin_from_response(request, extra_data) oauth2_login = OAuth2LoginView.adapter_view(WahooOAuth2Adapter) oauth2_callback = OAuth2CallbackView.adapter_view(WahooOAuth2Adapter)
WahooOAuth2Adapter
python
pytorch__pytorch
torch/onnx/_internal/fx/_pass.py
{ "start": 5493, "end": 8638 }
class ____(abc.ABC): """Base class for FX graph transformations to be used by FX-ONNX exporter. Similar to `FX Interpreter <https://pytorch.org/docs/stable/fx.html#torch.fx.Interpreter>`_, specializations of this class execute the FX graph Node-by-Node. Methods in the `Transform` class can be overridden to customize the behavior of the model. This pattern can be useful for many things, including writing code transformations as well as analysis passes. The following methods can be overridden:: _run() +-- run_node() +-- placeholder() +-- get_attr() +-- call_function() +-- call_method() +-- call_module() +-- output() One important aspect to note is that if the transformation modifies the model input and/or output signature, (e.g. additional inputs/outputs are added to the model), :class:`InputAdaptStep` and/or :class:`OutputAdaptStep` are needed to reconcile :attr:`ONNXProgram.model_proto`. That is, the model signature and the model representation must match. TODO(bowbao): Add more overridable methods in call hierarchy TODO(bowbao): Create an example once more overridable methods are added. """ module: torch.fx.GraphModule """The module to be transformed.""" fake_mode: fake_tensor.FakeTensorMode | None """The existing fake mode detected from `self.module`.""" def __init__( self, module: torch.fx.GraphModule, ) -> None: """Initialize the transform. Args: module: The module to be transformed. """ self.module = module self.fake_mode = self._detect_fake_mode() def _detect_fake_mode(self) -> fake_tensor.FakeTensorMode | None: """Detect fake mode from the graph. Scan through all nodes in graph and their meta['val'] to detect fake mode. """ fake_tensors = [node.meta.get("val") for node in self.module.graph.nodes] with unset_fake_temporarily(): return torch._dynamo.utils.detect_fake_mode(fake_tensors) def _maybe_fakefy_args( self, fake_mode: fake_tensor.FakeTensorMode | None, *args: Any ) -> tuple[Any, ...]: if fake_mode is None: return args # NB: This should hit the cache if tensors were fakefied before. # E.g., when the fx graph is produced by Dynamo. return tuple( fake_mode.from_tensor(t) if isinstance(t, torch.Tensor) else t for t in args ) @abc.abstractmethod def _run(self, *args, **kwargs) -> torch.fx.GraphModule: ... def run(self, *args, **kwargs) -> torch.fx.GraphModule: """Run the transform on `self.module`. Note that this method may or may not mutate `self.module`, and the returned `GraphModule` could be either `self.module` or a new `GraphModule`. Args: *args: Positional arguments for `self.module` to run. **kwargs: Keyword arguments for `self.module` to run. """ return self._run(*args, **kwargs)
Transform
python
keras-team__keras
keras/src/backend/numpy/export.py
{ "start": 0, "end": 351 }
class ____: def track(self, resource): raise NotImplementedError( "`track` is not implemented in the numpy backend." ) def add_endpoint(self, name, fn, input_signature=None, **kwargs): raise NotImplementedError( "`add_endpoint` is not implemented in the numpy backend." )
NumpyExportArchive
python
huggingface__transformers
src/transformers/models/altclip/modeling_altclip.py
{ "start": 14003, "end": 14717 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.align.modeling_align.AlignTextLayer with AlignText->AltRoberta
AltRobertaOutput
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_set.py
{ "start": 38862, "end": 39285 }
class ____(_TestBasicOps, __TestCase): def setUp(self): self.case = "bytes set" self.values = [b"a", b"b", b"c"] self.set = set(self.values) self.dup = set(self.values) self.length = 3 super().setUp() def test_repr(self): self.check_repr_against_values() #------------------------------------------------------------------------------
TestBasicOpsBytes
python
streamlit__streamlit
lib/streamlit/navigation/page.py
{ "start": 5347, "end": 11649 }
class ____: """A page within a multipage Streamlit app. Use ``st.Page`` to initialize a ``StreamlitPage`` object. Attributes ---------- icon : str The icon of the page. If no icon was declared in ``st.Page``, this property returns ``""``. title : str The title of the page. Unless declared otherwise in ``st.Page``, the page title is inferred from the filename or callable name. For more information, see `Overview of multipage apps <https://docs.streamlit.io/st.page.automatic-page-labels>`_. url_path : str The page's URL pathname, which is the path relative to the app's root URL. Unless declared otherwise in ``st.Page``, the URL pathname is inferred from the filename or callable name. For more information, see `Overview of multipage apps <https://docs.streamlit.io/st.page.automatic-page-urls>`_. The default page will always have a ``url_path`` of ``""`` to indicate the root URL (e.g. homepage). """ def __init__( self, page: str | Path | Callable[[], None], *, title: str | None = None, icon: str | None = None, url_path: str | None = None, default: bool = False, ) -> None: # Must appear before the return so all pages, even if running in bare Python, # have a _default property. This way we can always tell which script needs to run. self._default: bool = default ctx = get_script_run_ctx() if not ctx: return main_path = ctx.pages_manager.main_script_parent if isinstance(page, str): page = Path(page) if isinstance(page, Path): page = (main_path / page).resolve() if not page.is_file(): raise StreamlitAPIException( f"Unable to create Page. The file `{page.name}` could not be found." ) inferred_name = "" inferred_icon = "" if isinstance(page, Path): inferred_icon, inferred_name = page_icon_and_name(page) elif hasattr(page, "__name__"): inferred_name = str(page.__name__) elif title is None: # At this point, we know the page is not a string or a path, so it # must be a callable. We expect it to have a __name__ attribute, # but in special cases (e.g. a callable class instance), one may # not exist. In that case, we should inform the user the title is # mandatory. raise StreamlitAPIException( "Cannot infer page title for Callable. Set the `title=` keyword argument." ) self._page: Path | Callable[[], None] = page self._title: str = title or inferred_name.replace("_", " ") if icon is not None: # validate user provided icon. validate_icon_or_emoji(icon) self._icon: str = icon or inferred_icon if self._title.strip() == "": raise StreamlitAPIException( "The title of the page cannot be empty or consist of underscores/spaces only" ) self._url_path: str = inferred_name if url_path is not None: if url_path.strip() == "" and not default: raise StreamlitAPIException( "The URL path cannot be an empty string unless the page is the default page." ) self._url_path = url_path.strip("/") if "/" in self._url_path: raise StreamlitAPIException( "The URL path cannot contain a nested path (e.g. foo/bar)." ) if self._icon: validate_icon_or_emoji(self._icon) # used by st.navigation to ordain a page as runnable self._can_be_called: bool = False @property def title(self) -> str: """The title of the page. Unless declared otherwise in ``st.Page``, the page title is inferred from the filename or callable name. For more information, see `Overview of multipage apps <https://docs.streamlit.io/st.page.automatic-page-labels>`_. """ return self._title @property def icon(self) -> str: """The icon of the page. If no icon was declared in ``st.Page``, this property returns ``""``. """ return self._icon @property def url_path(self) -> str: """The page's URL pathname, which is the path relative to the app's \ root URL. Unless declared otherwise in ``st.Page``, the URL pathname is inferred from the filename or callable name. For more information, see `Overview of multipage apps <https://docs.streamlit.io/st.page.automatic-page-urls>`_. The default page will always have a ``url_path`` of ``""`` to indicate the root URL (e.g. homepage). """ return "" if self._default else self._url_path def run(self) -> None: """Execute the page. When a page is returned by ``st.navigation``, use the ``.run()`` method within your entrypoint file to render the page. You can only call this method on the page returned by ``st.navigation``. You can only call this method once per run of your entrypoint file. """ if not self._can_be_called: raise StreamlitAPIException( "This page cannot be called directly. Only the page returned from st.navigation can be called once." ) self._can_be_called = False ctx = get_script_run_ctx() if not ctx: return with ctx.run_with_active_hash(self._script_hash): if callable(self._page): self._page() return code = ctx.pages_manager.get_page_script_byte_code(str(self._page)) module = types.ModuleType("__main__") # We want __file__ to be the string path to the script module.__dict__["__file__"] = str(self._page) exec(code, module.__dict__) # noqa: S102 @property def _script_hash(self) -> str: return calc_md5(self._url_path)
StreamlitPage
python
python-pillow__Pillow
src/PIL/GdImageFile.py
{ "start": 873, "end": 2788 }
class ____(ImageFile.ImageFile): """ Image plugin for the GD uncompressed format. Note that this format is not supported by the standard :py:func:`PIL.Image.open()` function. To use this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and use the :py:func:`PIL.GdImageFile.open()` function. """ format = "GD" format_description = "GD uncompressed images" def _open(self) -> None: # Header assert self.fp is not None s = self.fp.read(1037) if i16(s) not in [65534, 65535]: msg = "Not a valid GD 2.x .gd file" raise SyntaxError(msg) self._mode = "P" self._size = i16(s, 2), i16(s, 4) true_color = s[6] true_color_offset = 2 if true_color else 0 # transparency index tindex = i32(s, 7 + true_color_offset) if tindex < 256: self.info["transparency"] = tindex self.palette = ImagePalette.raw( "RGBX", s[7 + true_color_offset + 6 : 7 + true_color_offset + 6 + 256 * 4] ) self.tile = [ ImageFile._Tile( "raw", (0, 0) + self.size, 7 + true_color_offset + 6 + 256 * 4, "L", ) ] def open(fp: StrOrBytesPath | IO[bytes], mode: str = "r") -> GdImageFile: """ Load texture from a GD image file. :param fp: GD file name, or an opened file handle. :param mode: Optional mode. In this version, if the mode argument is given, it must be "r". :returns: An image instance. :raises OSError: If the image could not be read. """ if mode != "r": msg = "bad mode" raise ValueError(msg) try: return GdImageFile(fp) except SyntaxError as e: msg = "cannot identify this image file" raise UnidentifiedImageError(msg) from e
GdImageFile
python
weaviate__weaviate-python-client
weaviate/collections/classes/config.py
{ "start": 17765, "end": 18127 }
class ____(_RerankerProvider): reranker: Union[Rerankers, _EnumLikeStr] = Field( default=Rerankers.CONTEXTUALAI, frozen=True, exclude=True ) model: Optional[Union[RerankerContextualAIModel, str]] = Field(default=None) instruction: Optional[str] = Field(default=None) topN: Optional[int] = Field(default=None)
_RerankerContextualAIConfig
python
pytorch__pytorch
benchmarks/instruction_counts/definitions/setup.py
{ "start": 1500, "end": 1636 }
class ____(enum.Enum): TRIVIAL_2D = _TRIVIAL_2D TRIVIAL_3D = _TRIVIAL_3D TRIVIAL_4D = _TRIVIAL_4D TRAINING = _TRAINING
Setup
python
kamyu104__LeetCode-Solutions
Python/most-stones-removed-with-same-row-or-column.py
{ "start": 487, "end": 848 }
class ____(object): def removeStones(self, stones): """ :type stones: List[List[int]] :rtype: int """ MAX_ROW = 10000 union_find = UnionFind(2*MAX_ROW) for r, c in stones: union_find.union_set(r, c+MAX_ROW) return len(stones) - len({union_find.find_set(r) for r, _ in stones})
Solution
python
huggingface__transformers
src/transformers/integrations/executorch.py
{ "start": 17536, "end": 26543 }
class ____(torch.nn.Module): """ A recipe module designed to make a `PreTrainedModel` exportable with `torch.export`, specifically for decoder-only LM to `StaticCache`. This module ensures that the exported model is compatible with further lowering and execution in `ExecuTorch`. Note: This class is specifically designed to support export process using `torch.export` in a way that ensures the model can be further lowered and run efficiently in `ExecuTorch`. """ def __init__( self, model: PreTrainedModel, batch_size: int | None = None, max_cache_len: int | None = None, device: torch.device | None = None, ) -> None: """ Initializes the wrapper module with the pretrained model. Args: model (`PreTrainedModel`): The pretrained model to wrap. The model must have caching enabled and use a 'static' caching implementation. batch_size (`Optional[int]`): The batch size of the model. If not provided, we check if a value can be found in `generation_config.cache_config` and otherwise we raise a ValueError. max_cache_len (`Optional[int]`): The maximum cache length for generation. Same mechanism as `batch_size` if not provided. device (`Optional[torch.device]`): The device to use. If not provided, we check if a value can be found in `generation_config.cache_config` and otherwise we use `model.device` (no error is raised). Raises: AssertionError: If the pretrained model does not have caching enabled or if it does not use a 'static' caching implementation in `model.generation_config`. ValueError: If `batch_size` or `max_cache_len` is not provided, either as an argument or in `cache_config`. """ super().__init__() config = model.config.get_text_config() generation_config = model.generation_config # Sanity checks if generation_config is None: raise AssertionError( "The model must have a generation config to be exported with static caching. " "Please set `generation_config` in `model`." ) if not generation_config.use_cache: raise AssertionError( "The model must have caching enabled to be exported with static caching. " "Please set `generation_config.use_cache=True`." ) if generation_config.cache_implementation != "static": raise AssertionError( "The model must use a 'static' caching implementation to be exported with static caching. " "Please set `generation_config.cache_implementation='static'`." ) cache_config = {} if generation_config.cache_config is None else generation_config.cache_config # Ensure batch_size and max_cache_len are set if batch_size is None: batch_size = cache_config.get("batch_size", None) if batch_size is None: raise ValueError("batch_size must be provided, either as an argument or in cache_config.") if max_cache_len is None: max_cache_len = cache_config.get("max_cache_len", None) if max_cache_len is None: raise ValueError("max_cache_len must be provided, either as an argument or in cache_config.") # Infer device if not provided if device is None: device = cache_config.get("device", model.device) # Initialize the static cache self.model = model self.static_cache = StaticCache(max_cache_len=max_cache_len, config=config) head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads) dtype = self.model.dtype # We need this call to initialize all the layers (otherwise it's done lazily, which is not exportable) self.static_cache.early_initialization(batch_size, num_heads, head_dim, dtype, device) for i in range(len(self.static_cache)): self.register_buffer(f"key_cache_{i}", self.static_cache.layers[i].keys, persistent=False) self.register_buffer(f"value_cache_{i}", self.static_cache.layers[i].values, persistent=False) def forward( self, input_ids: torch.LongTensor | None = None, inputs_embeds: torch.Tensor | None = None, cache_position: torch.Tensor | None = None, ): """ Forward pass of the module, which is compatible with the ExecuTorch runtime. Args: input_ids (`torch.Tensor`): Tensor representing current input token id to the module. inputs_embeds (`torch.Tensor`): Tensor representing current input embeddings to the module. cache_position (`torch.Tensor`): Tensor representing current input position in the cache. Returns: torch.Tensor: Logits output from the model. This forward adapter serves two primary purposes: 1. **Making the Model `torch.export`-Compatible**: The adapter hides unsupported objects, such as the `Cache`, from the graph inputs and outputs, enabling the model to be exportable using `torch.export` without encountering issues. 2. **Ensuring Compatibility with `ExecuTorch` runtime**: The adapter matches the model's forward signature with that in `executorch/extension/llm/runner`, ensuring that the exported model can be executed in `ExecuTorch` out-of-the-box. """ past_key_values = self.static_cache outs = self.model( input_ids=input_ids, inputs_embeds=inputs_embeds, cache_position=cache_position, attention_mask=None, past_key_values=past_key_values, use_cache=True, ) if hasattr(outs, "logits"): # Returned outputs is `CausalLMOutputWithPast` return outs.logits else: # Returned the `last_hidden_state` from `BaseModelOutputWithPast` return outs.last_hidden_state @staticmethod def generate( exported_program: torch.export.ExportedProgram, prompt_token_ids: torch.Tensor, max_new_tokens: int, ) -> torch.Tensor: """ Generate a sequence of tokens using an exported program. This util function is designed to test exported models by simulating the generation process. It processes the input prompt tokens sequentially (no parallel prefill). This generate function is not intended to replace the original `generate` method, and the support for leveraging the original `generate` is potentially planned! Args: exported_program (`torch.export.ExportedProgram`): The exported program generated via `torch.export`. prompt_token_ids (`torch.Tensor`): Tensor representing the input prompt token IDs. max_new_tokens (`int`): Maximum number of new tokens to generate. Note that the total generation length is limited by both `max_new_tokens` and the model's cache size. Returns: torch.Tensor: A tensor containing the generated sequence of token IDs, including the original prompt tokens. """ device = prompt_token_ids.device prompt_token_len = prompt_token_ids.shape[-1] max_generation_length = prompt_token_len + max_new_tokens for buffer_name, buffer in exported_program.named_buffers(): if buffer_name.startswith("key_cache"): max_cache_len = buffer.shape[2] max_generation_length = min(max_generation_length, max_cache_len) break response_tokens = [] for input_pos in range(min(max_generation_length, prompt_token_len)): result = exported_program.module().forward( input_ids=prompt_token_ids[:, input_pos : input_pos + 1], cache_position=torch.tensor([input_pos], dtype=torch.long, device=device), ) response_tokens.append(prompt_token_ids[0][input_pos].item()) current_token = torch.argmax(result[:, -1, :], dim=-1).item() response_tokens.append(current_token) while len(response_tokens) < max_generation_length: result = exported_program.module().forward( input_ids=torch.tensor([[current_token]], dtype=torch.long, device=device), cache_position=torch.tensor([len(response_tokens)], dtype=torch.long, device=device), ) current_token = torch.argmax(result[:, -1, :], dim=-1).item() response_tokens.append(current_token) return torch.tensor([response_tokens], dtype=torch.long, device=device)
TorchExportableModuleWithStaticCache
python
lazyprogrammer__machine_learning_examples
rl3/a2c/atari_wrappers.py
{ "start": 5160, "end": 6104 }
class ____(gym.ObservationWrapper): def __init__(self, env, width=84, height=84, grayscale=True): """Warp frames to 84x84 as done in the Nature paper and later work.""" gym.ObservationWrapper.__init__(self, env) self.width = width self.height = height self.grayscale = grayscale if self.grayscale: self.observation_space = spaces.Box(low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8) else: self.observation_space = spaces.Box(low=0, high=255, shape=(self.height, self.width, 3), dtype=np.uint8) def observation(self, frame): if self.grayscale: frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA) if self.grayscale: frame = np.expand_dims(frame, -1) return frame
WarpFrame
python
run-llama__llama_index
llama-index-integrations/vector_stores/llama-index-vector-stores-milvus/llama_index/vector_stores/milvus/utils.py
{ "start": 2673, "end": 6370 }
class ____(BaseModel): # scalar metadata filters for advanced vector filtering # https://docs.zilliz.com/docs/use-array-fields#advanced-scalar-filtering filters: List[ScalarMetadataFilter] # and/or such conditions for combining different filters condition: Optional[FilterCondition] = FilterCondition.AND def to_dict(self): return [filter.to_dict() for filter in self.filters] @classmethod def from_dict(cls, data): filters = [ScalarMetadataFilter.from_dict(item) for item in data] return cls(filters=filters) def parse_filter_value(filter_value: any, is_text_match: bool = False): if filter_value is None: return filter_value if is_text_match: # Per Milvus, "only prefix pattern match like ab% and equal match like ab(no wildcards) are supported" return f"'{filter_value!s}%'" if isinstance(filter_value, str): # Escape single quotes in strings filter_value = filter_value.replace("'", "\\'") return f"'{filter_value!s}'" return str(filter_value) def parse_standard_filters(standard_filters: MetadataFilters = None): filters = [] if standard_filters is None or standard_filters.filters is None: return filters, "" for filter in standard_filters.filters: if isinstance(filter, MetadataFilters): filters.append(f"({parse_standard_filters(filter)[1]})") continue filter_value = parse_filter_value(filter.value) if filter_value is None and filter.operator != FilterOperator.IS_EMPTY: continue if filter.operator == FilterOperator.NIN: filters.append(f"{filter.key!s} not in {filter_value}") elif filter.operator == FilterOperator.CONTAINS: filters.append(f"array_contains({filter.key!s}, {filter_value})") elif filter.operator == FilterOperator.ANY: filters.append(f"array_contains_any({filter.key!s}, {filter_value})") elif filter.operator == FilterOperator.ALL: filters.append(f"array_contains_all({filter.key!s}, {filter_value})") elif filter.operator == FilterOperator.TEXT_MATCH: filters.append( f"{filter.key!s} like {parse_filter_value(filter.value, True)}" ) elif filter.operator == FilterOperator.IS_EMPTY: # in Milvus, array_length(field_name) returns 0 if the field does not exist or is not an array filters.append(f"array_length({filter.key!s}) == 0") elif filter.operator in [ FilterOperator.EQ, FilterOperator.GT, FilterOperator.LT, FilterOperator.NE, FilterOperator.GTE, FilterOperator.LTE, FilterOperator.IN, ]: filters.append(f"{filter.key!s} {filter.operator.value} {filter_value}") else: raise ValueError( f'Operator {filter.operator} ("{filter.operator.value}") is not supported by Milvus.' ) return filters, f" {standard_filters.condition.value} ".join(filters) def parse_scalar_filters(scalar_filters: ScalarMetadataFilters = None): filters = [] if scalar_filters is None: return filters, "" scalar_filters = ScalarMetadataFilters.from_dict(scalar_filters) for filter in scalar_filters.filters: filter_value = parse_filter_value(filter.value) if filter_value is None: continue operator = filter.operator.value.format(key=filter.key, value=filter_value) filters.append(operator) return filters, f" {scalar_filters.condition.value} ".join(filters)
ScalarMetadataFilters
python
mlflow__mlflow
dev/tests/test_set_matrix.py
{ "start": 193, "end": 4883 }
class ____: def __init__(self, data): self.data = data def json(self): return self.data def raise_for_status(self): pass @classmethod def from_versions(cls, versions): return cls( { "releases": { v: [ { "filename": v + ".whl", "upload_time": "2023-10-04T16:38:57", } ] for v in versions } } ) def mock_pypi_api(mock_responses): def requests_get_patch(url, *args, **kwargs): package_name = re.search(r"https://pypi\.org/pypi/(.+)/json", url).group(1) return mock_responses[package_name] def decorator(test_func): @functools.wraps(test_func) def wrapper(*args, **kwargs): with mock.patch("requests.get", new=requests_get_patch): return test_func(*args, **kwargs) return wrapper return decorator @contextmanager def mock_ml_package_versions_yml(src_base, src_ref): with tempfile.TemporaryDirectory() as tmp_dir: yml_base = Path(tmp_dir).joinpath("base.yml") yml_ref = Path(tmp_dir).joinpath("ref.yml") yml_base.write_text(src_base) yml_ref.write_text(src_ref) yield ["--versions-yaml", str(yml_base), "--ref-versions-yaml", str(yml_ref)] MOCK_YAML_SOURCE = """ foo: package_info: pip_release: foo install_dev: "pip install git+https://github.com/foo/foo.git" autologging: minimum: "1.0.0" maximum: "1.2.0" run: pytest tests/foo bar: package_info: pip_release: bar install_dev: "pip install git+https://github.com/bar/bar.git" autologging: minimum: "1.3" maximum: "1.4" run: pytest/tests bar """ MOCK_PYPI_API_RESPONSES = { "foo": MockResponse.from_versions(["1.0.0", "1.1.0", "1.1.1", "1.2.0"]), "bar": MockResponse.from_versions(["1.3", "1.4"]), } @pytest.mark.parametrize( ("flavors", "expected"), [ ("foo", {"foo"}), ("foo,bar", {"foo", "bar"}), ("foo, bar", {"foo", "bar"}), # Contains a space after a comma ("", {"foo", "bar"}), (None, {"foo", "bar"}), ], ) @mock_pypi_api(MOCK_PYPI_API_RESPONSES) def test_flavors(flavors, expected): with mock_ml_package_versions_yml(MOCK_YAML_SOURCE, "{}") as path_args: flavors_args = [] if flavors is None else ["--flavors", flavors] matrix = generate_matrix([*path_args, *flavors_args]) flavors = {x.flavor for x in matrix} assert flavors == expected @pytest.mark.parametrize( ("versions", "expected"), [ ("1.0.0", {"1.0.0"}), ("1.0.0,1.1.1", {"1.0.0", "1.1.1"}), ("1.3, 1.4", {"1.3", "1.4"}), # Contains a space after a comma ("", {"1.0.0", "1.1.1", "1.2.0", "1.3", "1.4", "dev"}), (None, {"1.0.0", "1.1.1", "1.2.0", "1.3", "1.4", "dev"}), ], ) @mock_pypi_api(MOCK_PYPI_API_RESPONSES) def test_versions(versions, expected): with mock_ml_package_versions_yml(MOCK_YAML_SOURCE, "{}") as path_args: versions_args = [] if versions is None else ["--versions", versions] matrix = generate_matrix([*path_args, *versions_args]) versions = {str(x.version) for x in matrix} assert versions == expected @mock_pypi_api(MOCK_PYPI_API_RESPONSES) def test_flavors_and_versions(): with mock_ml_package_versions_yml(MOCK_YAML_SOURCE, "{}") as path_args: matrix = generate_matrix([*path_args, "--flavors", "foo,bar", "--versions", "dev"]) flavors = {x.flavor for x in matrix} versions = {str(x.version) for x in matrix} assert set(flavors) == {"foo", "bar"} assert set(versions) == {"dev"} @mock_pypi_api(MOCK_PYPI_API_RESPONSES) def test_no_dev(): with mock_ml_package_versions_yml(MOCK_YAML_SOURCE, "{}") as path_args: matrix = generate_matrix([*path_args, "--no-dev"]) flavors = {x.flavor for x in matrix} versions = {str(x.version) for x in matrix} assert set(flavors) == {"foo", "bar"} assert set(versions) == {"1.0.0", "1.1.1", "1.2.0", "1.3", "1.4"} @mock_pypi_api(MOCK_PYPI_API_RESPONSES) def test_changed_files(): with mock_ml_package_versions_yml(MOCK_YAML_SOURCE, MOCK_YAML_SOURCE) as path_args: matrix = generate_matrix([*path_args, "--changed-files", "mlflow/foo/__init__.py"]) flavors = {x.flavor for x in matrix} versions = {str(x.version) for x in matrix} assert set(flavors) == {"foo"} assert set(versions) == {"1.0.0", "1.1.1", "1.2.0", "dev"}
MockResponse
python
huggingface__transformers
src/transformers/models/arcee/modular_arcee.py
{ "start": 8410, "end": 8539 }
class ____(LlamaForSequenceClassification): pass @auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
ArceeForSequenceClassification
python
weaviate__weaviate-python-client
weaviate/collections/classes/aggregate.py
{ "start": 2683, "end": 2823 }
class ____: """The aggregation results for a collection grouped by a property.""" groups: List[AggregateGroup]
AggregateGroupByReturn
python
django__django
django/forms/utils.py
{ "start": 1923, "end": 2587 }
class ____(RenderableMixin): def as_field_group(self): return self.render() def as_hidden(self): raise NotImplementedError( "Subclasses of RenderableFieldMixin must provide an as_hidden() method." ) def as_widget(self): raise NotImplementedError( "Subclasses of RenderableFieldMixin must provide an as_widget() method." ) def __str__(self): """Render this field as an HTML widget.""" if self.field.show_hidden_initial: return self.as_widget() + self.as_hidden(only_initial=True) return self.as_widget() __html__ = __str__
RenderableFieldMixin
python
django__django
tests/model_fields/models.py
{ "start": 4340, "end": 4463 }
class ____(models.Model): d = models.DateField() dt = models.DateTimeField() t = models.TimeField()
DateTimeModel
python
spyder-ide__spyder
spyder/plugins/updatemanager/widgets/update.py
{ "start": 21248, "end": 21492 }
class ____(QMessageBox): def __init__(self, icon=None, text=None, parent=None): super().__init__(icon=icon, text=text, parent=parent) self.setWindowModality(Qt.NonModal) self.setTextFormat(Qt.RichText)
UpdateMessageBox
python
ray-project__ray
release/nightly_tests/decision_tree/cart_with_tree.py
{ "start": 272, "end": 3439 }
class ____: """A decision tree node.""" def __init__(self, gini, num_samples, num_samples_per_class, predicted_class): self.gini = gini self.num_samples = num_samples self.num_samples_per_class = num_samples_per_class self.predicted_class = predicted_class self.feature_index = 0 self.threshold = 0 self.left = None self.right = None def debug(self, feature_names, class_names, show_details): """Print an ASCII visualization of the tree.""" lines, _, _, _ = self._debug_aux( feature_names, class_names, show_details, root=True ) for line in lines: print(line) def _debug_aux(self, feature_names, class_names, show_details, root=False): # See https://stackoverflow.com/a/54074933/1143396 for similar code. is_leaf = not self.right if is_leaf: lines = [class_names[self.predicted_class]] else: lines = [ "{} < {:.2f}".format(feature_names[self.feature_index], self.threshold) ] if show_details: lines += [ "gini = {:.2f}".format(self.gini), "samples = {}".format(self.num_samples), str(self.num_samples_per_class), ] width = max(len(line) for line in lines) height = len(lines) if is_leaf: lines = ["║ {:^{width}} ║".format(line, width=width) for line in lines] lines.insert(0, "╔" + "═" * (width + 2) + "╗") lines.append("╚" + "═" * (width + 2) + "╝") else: lines = ["│ {:^{width}} │".format(line, width=width) for line in lines] lines.insert(0, "┌" + "─" * (width + 2) + "┐") lines.append("└" + "─" * (width + 2) + "┘") lines[-2] = "┤" + lines[-2][1:-1] + "├" width += 4 # for padding if is_leaf: middle = width // 2 lines[0] = lines[0][:middle] + "╧" + lines[0][middle + 1 :] return lines, width, height, middle # If not a leaf, must have two children. left, n, p, x = self.left._debug_aux(feature_names, class_names, show_details) right, m, q, y = self.right._debug_aux(feature_names, class_names, show_details) top_lines = [n * " " + line + m * " " for line in lines[:-2]] # fmt: off middle_line = x * " " + "┌" + ( n - x - 1) * "─" + lines[-2] + y * "─" + "┐" + (m - y - 1) * " " bottom_line = x * " " + "│" + ( n - x - 1) * " " + lines[-1] + y * " " + "│" + (m - y - 1) * " " # fmt: on if p < q: left += [n * " "] * (q - p) elif q < p: right += [m * " "] * (p - q) zipped_lines = zip(left, right) lines = ( top_lines + [middle_line, bottom_line] + [a + width * " " + b for a, b in zipped_lines] ) middle = n + width // 2 if not root: lines[0] = lines[0][:middle] + "┴" + lines[0][middle + 1 :] return lines, n + m + width, max(p, q) + 2 + len(top_lines), middle
Node
python
getsentry__sentry
src/sentry/workflow_engine/endpoints/organization_workflow_index.py
{ "start": 3307, "end": 3851 }
class ____(OrganizationEndpoint): permission_classes = (OrganizationWorkflowPermission,) def convert_args(self, request: Request, workflow_id, *args, **kwargs): args, kwargs = super().convert_args(request, *args, **kwargs) try: kwargs["workflow"] = Workflow.objects.get( organization=kwargs["organization"], id=workflow_id ) except Workflow.DoesNotExist: raise ResourceDoesNotExist return args, kwargs @region_silo_endpoint
OrganizationWorkflowEndpoint
python
PrefectHQ__prefect
tests/utilities/test_pydantic.py
{ "start": 1323, "end": 2184 }
class ____: @pytest.fixture(autouse=True) def check_for_model_decoration_exception(self): if REDUCTION_MODELS_EXC: raise RuntimeError("Failed to create test model.") from REDUCTION_MODELS_EXC def test_add_cloudpickle_reduction(self): model = CythonFieldModel(x="./foo.txt") result = cloudpickle.loads(cloudpickle.dumps(model)) assert result == model def test_add_cloudpickle_reduction_with_kwargs(self): model = ReductionWithKwargs(x=1, y="test") # A mock is hard to use here because it is not serializable so we exclude a # field instead result = cloudpickle.loads(cloudpickle.dumps(model)) assert result.x == 0, ( "'x' should return to the default value since it was excluded" ) assert result.y == "test"
TestCloudpickleReduction
python
getsentry__sentry
tests/sentry/db/postgres/schema/safe_migrations/integration/test_migrations.py
{ "start": 4589, "end": 5029 }
class ____(BaseSafeMigrationTest): app = "bad_flow_rename_table_app" migrate_from = "0001_initial" migrate_to = "0002_rename_table" def test(self) -> None: with pytest.raises( UnsafeOperationException, match="Renaming table for model NewTable from bad_flow_rename_table_app_testtable to bad_flow_rename_table_app_newtable is unsafe", ): self.run_migration()
RenameTableTest
python
sqlalchemy__sqlalchemy
test/orm/test_hasparent.py
{ "start": 593, "end": 6494 }
class ____(fixtures.MappedTest): """Test that the 'hasparent' flag gets flipped to False only if we're sure this object is the real parent. In ambiguous cases a stale data exception is raised. """ run_inserts = None # trying to push GC to do a better job run_setup_classes = "each" run_setup_mappers = "each" @classmethod def define_tables(cls, metadata): if testing.against("oracle"): fk_args = dict(deferrable=True, initially="deferred") elif testing.against("mysql"): fk_args = {} else: fk_args = dict(onupdate="cascade") Table( "users", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), ) Table( "addresses", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("user_id", Integer, ForeignKey("users.id", **fk_args)), ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass @classmethod def setup_mappers(cls): cls.mapper_registry.map_imperatively( cls.classes.Address, cls.tables.addresses ) cls.mapper_registry.map_imperatively( cls.classes.User, cls.tables.users, properties={ "addresses": relationship( cls.classes.Address, cascade="all, delete-orphan" ) }, ) def _assert_hasparent(self, a1): assert attributes.has_parent(self.classes.User, a1, "addresses") def _assert_not_hasparent(self, a1): assert not attributes.has_parent(self.classes.User, a1, "addresses") def _fixture(self): User, Address = self.classes.User, self.classes.Address s = fixture_session() u1 = User() a1 = Address() u1.addresses.append(a1) s.add(u1) s.flush() return s, u1, a1 def test_stale_state_positive(self): User = self.classes.User s, u1, a1 = self._fixture() s.expunge(u1) u1 = s.query(User).first() u1.addresses.remove(a1) self._assert_not_hasparent(a1) def test_stale_state_positive_gc(self): User = self.classes.User s, u1, a1 = self._fixture() s.expunge(u1) u1_is = u1._sa_instance_state # act as though GC happened u1_is._force_dereference() u1 = s.query(User).first() u1.addresses.remove(a1) self._assert_not_hasparent(a1) @testing.requires.updateable_autoincrement_pks def test_stale_state_positive_pk_change(self): """Illustrate that we can't easily link a stale state to a fresh one if the fresh one has a PK change (unless we a. tracked all the previous PKs, wasteful, or b. recycled states - time consuming, breaks lots of edge cases, destabilizes the code) """ User = self.classes.User s, u1, a1 = self._fixture() s._expunge_states([attributes.instance_state(u1)]) u1is = u1._sa_instance_state del u1 gc_collect() u1is._force_dereference() u1 = s.query(User).first() # primary key change. now we # can't rely on state.key as the # identifier. new_id = u1.id + 10 u1.id = new_id a1.user_id = new_id s.flush() assert_raises_message( orm_exc.StaleDataError, "can't be sure this is the most recent parent.", u1.addresses.remove, a1, ) # u1.addresses wasn't actually impacted, because the event was # caught before collection mutation eq_(u1.addresses, [a1]) # expire all and we can continue s.expire_all() u1.addresses.remove(a1) self._assert_not_hasparent(a1) def test_stale_state_negative_child_expired(self): """illustrate the current behavior of expiration on the child. there's some uncertainty here in how this use case should work. """ User = self.classes.User s, u1, a1 = self._fixture() gc_collect() u2 = User(addresses=[a1]) # noqa s.expire(a1) u1.addresses.remove(a1) u2_is = u2._sa_instance_state # act as though GC happened u2_is._force_dereference() # controversy here. The action is # to expire one object, not the other, and remove; # this is pretty abusive in any case. for now # we are expiring away the 'parents' collection # so the remove will unset the hasparent flag. # this is what has occurred historically in any case. self._assert_not_hasparent(a1) # self._assert_hasparent(a1) def test_stale_state_negative(self): User = self.classes.User s, u1, a1 = self._fixture() gc_collect() u2 = User(addresses=[a1]) s.add(u2) s.flush() s._expunge_states([attributes.instance_state(u2)]) u2_is = u2._sa_instance_state u2_is._force_dereference() assert_raises_message( orm_exc.StaleDataError, "can't be sure this is the most recent parent.", u1.addresses.remove, a1, ) s.flush() self._assert_hasparent(a1) def test_fresh_state_positive(self): s, u1, a1 = self._fixture() self._assert_hasparent(a1) def test_fresh_state_negative(self): s, u1, a1 = self._fixture() u1.addresses.remove(a1) self._assert_not_hasparent(a1)
ParentRemovalTest
python
huggingface__transformers
src/transformers/models/got_ocr2/modeling_got_ocr2.py
{ "start": 2427, "end": 8312 }
class ____(nn.Module): """Multi-head Attention block with relative position embeddings.""" def __init__(self, config, window_size): super().__init__() input_size = ( (config.image_size // config.patch_size, config.image_size // config.patch_size) if window_size == 0 else (window_size, window_size) ) self.num_attention_heads = config.num_attention_heads head_dim = config.hidden_size // config.num_attention_heads self.scale = head_dim**-0.5 self.dropout = config.attention_dropout self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias) self.proj = nn.Linear(config.hidden_size, config.hidden_size) self.use_rel_pos = config.use_rel_pos if self.use_rel_pos: if input_size is None: raise ValueError("Input size must be provided if using relative positional encoding.") # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of the query. k_size (int): size of key k. rel_pos (`torch.Tensor`): relative position embeddings (L, channel). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos. rel_pos_resized = F.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] def get_decomposed_rel_pos( self, query: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: tuple[int, int], k_size: tuple[int, int], ) -> torch.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py Args: query (`torch.Tensor`): query q in the attention layer with shape (batch_size, query_height * query_width, channel). rel_pos_h (`torch.Tensor`): relative position embeddings (Lh, channel) for height axis. rel_pos_w (`torch.Tensor`): relative position embeddings (Lw, channel) for width axis. q_size (tuple): spatial sequence size of query q with (query_height, query_width). k_size (tuple): spatial sequence size of key k with (key_height, key_width). Returns: decomposed_rel_pos (`torch.Tensor`): decomposed relative position embeddings. """ query_height, query_width = q_size key_height, key_width = k_size relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) batch_size, _, dim = query.shape reshaped_query = query.reshape(batch_size, query_height, query_width, dim) rel_h = torch.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) rel_w = torch.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) decomposed_rel_pos = rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] return decomposed_rel_pos def forward(self, hidden_states: torch.Tensor, output_attentions=None) -> tuple[torch.Tensor, torch.Tensor]: batch_size, height, width, _ = hidden_states.shape # qkv with shape (3, batch_size, nHead, height * width, channel) qkv = ( self.qkv(hidden_states) .reshape(batch_size, height * width, 3, self.num_attention_heads, -1) .permute(2, 0, 3, 1, 4) ) # q, k, v with shape (batch_size * nHead, height * width, channel) query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0) attn_weights = (query * self.scale) @ key.transpose(-2, -1) if self.use_rel_pos: decomposed_rel_pos = self.get_decomposed_rel_pos( query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) decomposed_rel_pos = decomposed_rel_pos.reshape_as(attn_weights) attn_weights = attn_weights + decomposed_rel_pos attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype) attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = (attn_probs @ value).reshape(batch_size, self.num_attention_heads, height, width, -1) attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1) attn_output = self.proj(attn_output) return attn_output, attn_weights
GotOcr2VisionAttention
python
PyCQA__pylint
pylint/checkers/refactoring/refactoring_checker.py
{ "start": 8145, "end": 101282 }
class ____(checkers.BaseTokenChecker): """Looks for code which can be refactored. This checker also mixes the astroid and the token approaches in order to create knowledge about whether an "else if" node is a true "else if" node, or an "elif" node. """ name = "refactoring" msgs = { "R1701": ( "Consider merging these isinstance calls to isinstance(%s, (%s))", "consider-merging-isinstance", "Used when multiple consecutive isinstance calls can be merged into one.", ), "R1706": ( "Consider using ternary (%s)", "consider-using-ternary", "Used when one of known pre-python 2.5 ternary syntax is used.", ), "R1709": ( "Boolean expression may be simplified to %s", "simplify-boolean-expression", "Emitted when redundant pre-python 2.5 ternary syntax is used.", ), "R1726": ( 'Boolean condition "%s" may be simplified to "%s"', "simplifiable-condition", "Emitted when a boolean condition is able to be simplified.", ), "R1727": ( "Boolean condition '%s' will always evaluate to '%s'", "condition-evals-to-constant", "Emitted when a boolean condition can be simplified to a constant value.", ), "R1702": ( "Too many nested blocks (%s/%s)", "too-many-nested-blocks", "Used when a function or a method has too many nested " "blocks. This makes the code less understandable and " "maintainable.", {"old_names": [("R0101", "old-too-many-nested-blocks")]}, ), "R1703": ( "The if statement can be replaced with %s", "simplifiable-if-statement", "Used when an if statement can be replaced with 'bool(test)'.", {"old_names": [("R0102", "old-simplifiable-if-statement")]}, ), "R1704": ( "Redefining argument with the local name %r", "redefined-argument-from-local", "Used when a local name is redefining an argument, which might " "suggest a potential error. This is taken in account only for " "a handful of name binding operations, such as for iteration, " "with statement assignment and exception handler assignment.", ), "R1705": ( 'Unnecessary "%s" after "return", %s', "no-else-return", "Used in order to highlight an unnecessary block of " "code following an if, or a try/except containing a return statement. " "As such, it will warn when it encounters an else " "following a chain of ifs, all of them containing a " "return statement.", ), "R1707": ( "Disallow trailing comma tuple", "trailing-comma-tuple", "In Python, a tuple is actually created by the comma symbol, " "not by the parentheses. Unfortunately, one can actually create a " "tuple by misplacing a trailing comma, which can lead to potential " "weird bugs in your code. You should always use parentheses " "explicitly for creating a tuple.", ), "R1708": ( "Do not raise StopIteration in generator, use return statement instead", "stop-iteration-return", "According to PEP479, the raise of StopIteration to end the loop of " "a generator may lead to hard to find bugs. This PEP specify that " "raise StopIteration has to be replaced by a simple return statement", ), "R1710": ( "Either all return statements in a function should return an expression, " "or none of them should.", "inconsistent-return-statements", "According to PEP8, if any return statement returns an expression, " "any return statements where no value is returned should explicitly " "state this as return None, and an explicit return statement " "should be present at the end of the function (if reachable)", ), "R1711": ( "Useless return at end of function or method", "useless-return", 'Emitted when a single "return" or "return None" statement is found ' "at the end of function or method definition. This statement can safely be " "removed because Python will implicitly return None", ), "R1712": ( "Consider using tuple unpacking for swapping variables", "consider-swap-variables", "You do not have to use a temporary variable in order to " 'swap variables. Using "tuple unpacking" to directly swap ' "variables makes the intention more clear.", ), "R1713": ( "Consider using str.join(sequence) for concatenating " "strings from an iterable", "consider-using-join", "Using str.join(sequence) is faster, uses less memory " "and increases readability compared to for-loop iteration.", ), "R1714": ( "Consider merging these comparisons with 'in' by using '%s %sin (%s)'." " Use a set instead if elements are hashable.", "consider-using-in", "To check if a variable is equal to one of many values, " 'combine the values into a set or tuple and check if the variable is contained "in" it ' "instead of checking for equality against each of the values. " "This is faster and less verbose.", ), "R1715": ( "Consider using dict.get for getting values from a dict " "if a key is present or a default if not", "consider-using-get", "Using the builtin dict.get for getting a value from a dictionary " "if a key is present or a default if not, is simpler and considered " "more idiomatic, although sometimes a bit slower", ), "R1716": ( "Simplify chained comparison between the operands", "chained-comparison", "This message is emitted when pylint encounters boolean operation like " '"a < b and b < c", suggesting instead to refactor it to "a < b < c"', ), "R1717": ( "Consider using a dictionary comprehension", "consider-using-dict-comprehension", "Emitted when we detect the creation of a dictionary " "using the dict() callable and a transient list. " "Although there is nothing syntactically wrong with this code, " "it is hard to read and can be simplified to a dict comprehension. " "Also it is faster since you don't need to create another " "transient list", ), "R1718": ( "Consider using a set comprehension", "consider-using-set-comprehension", "Although there is nothing syntactically wrong with this code, " "it is hard to read and can be simplified to a set comprehension. " "Also it is faster since you don't need to create another " "transient list", ), "R1719": ( "The if expression can be replaced with %s", "simplifiable-if-expression", "Used when an if expression can be replaced with 'bool(test)' " "or simply 'test' if the boolean cast is implicit.", ), "R1720": ( 'Unnecessary "%s" after "raise", %s', "no-else-raise", "Used in order to highlight an unnecessary block of " "code following an if, or a try/except containing a raise statement. " "As such, it will warn when it encounters an else " "following a chain of ifs, all of them containing a " "raise statement.", ), "R1721": ( "Unnecessary use of a comprehension, use %s instead.", "unnecessary-comprehension", "Instead of using an identity comprehension, " "consider using the list, dict or set constructor. " "It is faster and simpler.", ), "R1722": ( "Consider using 'sys.exit' instead", "consider-using-sys-exit", "Contrary to 'exit()' or 'quit()', 'sys.exit' does not rely on the " "site module being available (as the 'sys' module is always available).", ), "R1723": ( 'Unnecessary "%s" after "break", %s', "no-else-break", "Used in order to highlight an unnecessary block of " "code following an if containing a break statement. " "As such, it will warn when it encounters an else " "following a chain of ifs, all of them containing a " "break statement.", ), "R1724": ( 'Unnecessary "%s" after "continue", %s', "no-else-continue", "Used in order to highlight an unnecessary block of " "code following an if containing a continue statement. " "As such, it will warn when it encounters an else " "following a chain of ifs, all of them containing a " "continue statement.", ), "R1725": ( "Consider using Python 3 style super() without arguments", "super-with-arguments", "Emitted when calling the super() builtin with the current class " "and instance. On Python 3 these arguments are the default and they can be omitted.", ), "R1728": ( "Consider using a generator instead '%s(%s)'", "consider-using-generator", "If your container can be large using " "a generator will bring better performance.", ), "R1729": ( "Use a generator instead '%s(%s)'", "use-a-generator", "Comprehension inside of 'any', 'all', 'max', 'min' or 'sum' is unnecessary. " "A generator would be sufficient and faster.", ), "R1730": ( "Consider using '%s' instead of unnecessary if block", "consider-using-min-builtin", "Using the min builtin instead of a conditional improves readability and conciseness.", ), "R1731": ( "Consider using '%s' instead of unnecessary if block", "consider-using-max-builtin", "Using the max builtin instead of a conditional improves readability and conciseness.", ), "R1732": ( "Consider using 'with' for resource-allocating operations", "consider-using-with", "Emitted if a resource-allocating assignment or call may be replaced by a 'with' block. " "By using 'with' the release of the allocated resources is ensured even in the case " "of an exception.", ), "R1733": ( "Unnecessary dictionary index lookup, use '%s' instead", "unnecessary-dict-index-lookup", "Emitted when iterating over the dictionary items (key-item pairs) and accessing the " "value by index lookup. " "The value can be accessed directly instead.", ), "R1734": ( "Consider using [] instead of list()", "use-list-literal", "Emitted when using list() to create an empty list instead of the literal []. " "The literal is faster as it avoids an additional function call.", ), "R1735": ( "Consider using '%s' instead of a call to 'dict'.", "use-dict-literal", "Emitted when using dict() to create a dictionary instead of a literal '{ ... }'. " "The literal is faster as it avoids an additional function call.", ), "R1736": ( "Unnecessary list index lookup, use '%s' instead", "unnecessary-list-index-lookup", "Emitted when iterating over an enumeration and accessing the " "value by index lookup. " "The value can be accessed directly instead.", ), "R1737": ( "Use 'yield from' directly instead of yielding each element one by one", "use-yield-from", "Yielding directly from the iterator is faster and arguably cleaner code than yielding each element " "one by one in the loop.", ), } options = ( ( "max-nested-blocks", { "default": 5, "type": "int", "metavar": "<int>", "help": "Maximum number of nested blocks for function / method body", }, ), ( "never-returning-functions", { "default": ("sys.exit", "argparse.parse_error"), "type": "csv", "metavar": "<members names>", "help": "Complete name of functions that never returns. When checking " "for inconsistent-return-statements if a never returning function is " "called then it will be considered as an explicit return statement " "and no message will be printed.", }, ), ( "suggest-join-with-non-empty-separator", { "default": True, "type": "yn", "metavar": "<y or n>", "help": ( "Let 'consider-using-join' be raised when the separator to " "join on would be non-empty (resulting in expected fixes " 'of the type: ``"- " + "\n- ".join(items)``)' ), }, ), ) def __init__(self, linter: PyLinter) -> None: super().__init__(linter) self._return_nodes: dict[str, list[nodes.Return]] = {} self._consider_using_with_stack = ConsiderUsingWithStack() self._init() self._never_returning_functions: set[str] = set() self._suggest_join_with_non_empty_separator: bool = False def _init(self) -> None: self._nested_blocks: list[NodesWithNestedBlocks] = [] self._elifs: list[tuple[int, int]] = [] self._reported_swap_nodes: set[nodes.NodeNG] = set() self._can_simplify_bool_op: bool = False self._consider_using_with_stack.clear_all() def open(self) -> None: # do this in open since config not fully initialized in __init__ self._never_returning_functions = set( self.linter.config.never_returning_functions ) self._suggest_join_with_non_empty_separator = ( self.linter.config.suggest_join_with_non_empty_separator ) @cached_property def _dummy_rgx(self) -> Pattern[str]: return self.linter.config.dummy_variables_rgx # type: ignore[no-any-return] @staticmethod def _is_bool_const(node: nodes.Return | nodes.Assign) -> bool: return isinstance(node.value, nodes.Const) and isinstance( node.value.value, bool ) def _is_actual_elif(self, node: nodes.If | nodes.Try) -> bool: """Check if the given node is an actual elif. This is a problem we're having with the builtin ast module, which splits `elif` branches into a separate if statement. Unfortunately we need to know the exact type in certain cases. """ match node.parent: case nodes.If(orelse=[n]) if n == node: # current if node must directly follow an "else" if (node.lineno, node.col_offset) in self._elifs: return True return False def _check_simplifiable_if(self, node: nodes.If) -> None: """Check if the given if node can be simplified. The if statement can be reduced to a boolean expression in some cases. For instance, if there are two branches and both of them return a boolean value that depends on the result of the statement's test, then this can be reduced to `bool(test)` without losing any functionality. """ if self._is_actual_elif(node): # Not interested in if statements with multiple branches. return match node: case nodes.If(body=[first_branch], orelse=[else_branch]): pass case _: return # Check if both branches can be reduced. match first_branch: case nodes.Return(): if not isinstance(else_branch, nodes.Return): return first_branch_is_bool = self._is_bool_const(first_branch) else_branch_is_bool = self._is_bool_const(else_branch) reduced_to = "'return bool(test)'" case nodes.Assign(): if not isinstance(else_branch, nodes.Assign): return # Check if we assign to the same value first_branch_targets = [ target.name for target in first_branch.targets if isinstance(target, nodes.AssignName) ] else_branch_targets = [ target.name for target in else_branch.targets if isinstance(target, nodes.AssignName) ] if not (first_branch_targets and else_branch_targets): return if sorted(first_branch_targets) != sorted(else_branch_targets): return first_branch_is_bool = self._is_bool_const(first_branch) else_branch_is_bool = self._is_bool_const(else_branch) reduced_to = "'var = bool(test)'" case _: return if not (first_branch_is_bool and else_branch_is_bool): return if not first_branch.value.value: # This is a case that can't be easily simplified and # if it can be simplified, it will usually result in a # code that's harder to understand and comprehend. # Let's take for instance `arg and arg <= 3`. This could theoretically be # reduced to `not arg or arg > 3`, but the net result is that now the # condition is harder to understand, because it requires understanding of # an extra clause: # * first, there is the negation of truthness with `not arg` # * the second clause is `arg > 3`, which occurs when arg has a # a truth value, but it implies that `arg > 3` is equivalent # with `arg and arg > 3`, which means that the user must # think about this assumption when evaluating `arg > 3`. # The original form is easier to grasp. return self.add_message("simplifiable-if-statement", node=node, args=(reduced_to,)) def process_tokens(self, tokens: list[tokenize.TokenInfo]) -> None: # Optimization flag because '_is_trailing_comma' is costly trailing_comma_tuple_enabled_for_file = self.linter.is_message_enabled( "trailing-comma-tuple" ) trailing_comma_tuple_enabled_once: bool = trailing_comma_tuple_enabled_for_file # Process tokens and look for 'if' or 'elif' for index, token in enumerate(tokens): token_string = token[1] if ( not trailing_comma_tuple_enabled_once and token_string.startswith("#") # We have at least 1 '#' (one char) at the start of the token and "pylint:" in token_string[1:] # We have at least '#' 'pylint' ( + ':') (8 chars) at the start of the token and "enable" in token_string[8:] # We have at least '#', 'pylint', ( + ':'), 'enable' (+ '=') (15 chars) at # the start of the token and any( c in token_string[15:] for c in ("trailing-comma-tuple", "R1707") ) ): # Way to not have to check if "trailing-comma-tuple" is enabled or # disabled on each line: Any enable for it during tokenization and # we'll start using the costly '_is_trailing_comma' to check if we # need to raise the message. We still won't raise if it's disabled # again due to the usual generic message control handling later. trailing_comma_tuple_enabled_once = True if token_string == "elif": # AST exists by the time process_tokens is called, so # it's safe to assume tokens[index+1] exists. # tokens[index+1][2] is the elif's position as # reported by CPython and PyPy, # token[2] is the actual position and also is # reported by IronPython. self._elifs.extend([token[2], tokens[index + 1][2]]) elif ( trailing_comma_tuple_enabled_for_file or trailing_comma_tuple_enabled_once ) and _is_trailing_comma(tokens, index): # If "trailing-comma-tuple" is enabled globally we always check _is_trailing_comma # it might be for nothing if there's a local disable, or if the message control is # not enabling 'trailing-comma-tuple', but the alternative is having to check if # it's enabled for a line each line (just to avoid calling '_is_trailing_comma'). self.add_message( "trailing-comma-tuple", line=token.start[0], confidence=HIGH ) @utils.only_required_for_messages("consider-using-with") def leave_module(self, _: nodes.Module) -> None: # check for context managers that have been created but not used self._emit_consider_using_with_if_needed( self._consider_using_with_stack.module_scope ) self._init() @utils.only_required_for_messages("too-many-nested-blocks", "no-else-return") def visit_try(self, node: nodes.Try) -> None: self._check_nested_blocks(node) self._check_superfluous_else_return(node) self._check_superfluous_else_raise(node) visit_while = visit_try def _check_redefined_argument_from_local(self, name_node: nodes.AssignName) -> None: if self._dummy_rgx and self._dummy_rgx.match(name_node.name): return if not name_node.lineno: # Unknown position, maybe it is a manually built AST? return scope = name_node.scope() if not isinstance(scope, nodes.FunctionDef): return for defined_argument in scope.args.nodes_of_class( nodes.AssignName, skip_klass=(nodes.Lambda,) ): if defined_argument.name == name_node.name: self.add_message( "redefined-argument-from-local", node=name_node, args=(name_node.name,), ) @utils.only_required_for_messages( "redefined-argument-from-local", "too-many-nested-blocks", "unnecessary-dict-index-lookup", "unnecessary-list-index-lookup", ) def visit_for(self, node: nodes.For) -> None: self._check_nested_blocks(node) self._check_unnecessary_dict_index_lookup(node) self._check_unnecessary_list_index_lookup(node) for name in node.target.nodes_of_class(nodes.AssignName): self._check_redefined_argument_from_local(name) @utils.only_required_for_messages("redefined-argument-from-local") def visit_excepthandler(self, node: nodes.ExceptHandler) -> None: if node.name and isinstance(node.name, nodes.AssignName): self._check_redefined_argument_from_local(node.name) @utils.only_required_for_messages( "redefined-argument-from-local", "consider-using-with" ) def visit_with(self, node: nodes.With) -> None: for var, names in node.items: if isinstance(var, nodes.Name): for stack in self._consider_using_with_stack: # We don't need to restrict the stacks we search to the current scope and # outer scopes, as e.g. the function_scope stack will be empty when we # check a ``with`` on the class level. if var.name in stack: del stack[var.name] break if not names: continue for name in names.nodes_of_class(nodes.AssignName): self._check_redefined_argument_from_local(name) def _check_superfluous_else( self, node: nodes.If | nodes.Try, msg_id: str, returning_node_class: nodes.NodeNG, ) -> None: if isinstance(node, nodes.Try) and node.finalbody: # Not interested in try/except/else/finally statements. return if not node.orelse: # Not interested in if/try statements without else. return if self._is_actual_elif(node): # Not interested in elif nodes; only if return if ( isinstance(node, nodes.If) and _if_statement_is_always_returning(node, returning_node_class) ) or ( isinstance(node, nodes.Try) and not node.finalbody and _except_statement_is_always_returning(node, returning_node_class) ): orelse = node.orelse[0] if (orelse.lineno, orelse.col_offset) in self._elifs: args = ("elif", 'remove the leading "el" from "elif"') else: args = ("else", 'remove the "else" and de-indent the code inside it') self.add_message(msg_id, node=node, args=args, confidence=HIGH) def _check_superfluous_else_return(self, node: nodes.If) -> None: return self._check_superfluous_else( node, msg_id="no-else-return", returning_node_class=nodes.Return ) def _check_superfluous_else_raise(self, node: nodes.If) -> None: return self._check_superfluous_else( node, msg_id="no-else-raise", returning_node_class=nodes.Raise ) def _check_superfluous_else_break(self, node: nodes.If) -> None: return self._check_superfluous_else( node, msg_id="no-else-break", returning_node_class=nodes.Break ) def _check_superfluous_else_continue(self, node: nodes.If) -> None: return self._check_superfluous_else( node, msg_id="no-else-continue", returning_node_class=nodes.Continue ) @staticmethod def _type_and_name_are_equal(node_a: Any, node_b: Any) -> bool: match (node_a, node_b): case ( [nodes.Name(name=a), nodes.Name(name=b)] | [nodes.AssignName(name=a), nodes.AssignName(name=b)] | [nodes.Const(value=a), nodes.Const(value=b)] ): return a == b # type: ignore[no-any-return] return False def _is_dict_get_block(self, node: nodes.If) -> bool: match node: case nodes.If( test=nodes.Compare() as test, body=[ nodes.Assign( targets=[nodes.AssignName()], value=nodes.Subscript(slice=slice_value, value=value), ) ], ): # "if <compare node>" # Does not have a single statement in the guard's body # Look for a single variable assignment on the LHS and a subscript on RHS pass case _: return False # The subscript's slice needs to be the same as the test variable. if not ( self._type_and_name_are_equal(value, test.ops[0][1]) and self._type_and_name_are_equal(slice_value, test.left) ): return False # The object needs to be a dictionary instance return isinstance(utils.safe_infer(test.ops[0][1]), nodes.Dict) def _check_consider_get(self, node: nodes.If) -> None: if not self._is_dict_get_block(node): return match node: case nodes.If(orelse=[]): self.add_message("consider-using-get", node=node) case nodes.If( body=[nodes.Assign(targets=[t1])], orelse=[nodes.Assign(targets=[t2])] ) if self._type_and_name_are_equal(t1, t2): self.add_message("consider-using-get", node=node) @utils.only_required_for_messages( "too-many-nested-blocks", "simplifiable-if-statement", "no-else-return", "no-else-raise", "no-else-break", "no-else-continue", "consider-using-get", "consider-using-min-builtin", "consider-using-max-builtin", ) def visit_if(self, node: nodes.If) -> None: self._check_simplifiable_if(node) self._check_nested_blocks(node) self._check_superfluous_else_return(node) self._check_superfluous_else_raise(node) self._check_superfluous_else_break(node) self._check_superfluous_else_continue(node) self._check_consider_get(node) self._check_consider_using_min_max_builtin(node) def _check_consider_using_min_max_builtin(self, node: nodes.If) -> None: """Check if the given if node can be refactored as a min/max python builtin.""" # This function is written expecting a test condition of form: # if a < b: # [consider-using-max-builtin] # a = b # if a > b: # [consider-using-min-builtin] # a = b if self._is_actual_elif(node) or node.orelse: # Not interested in if statements with multiple branches. return if len(node.body) != 1: return def get_node_name(node: nodes.NodeNG) -> str: """Obtain simplest representation of a node as a string.""" if isinstance(node, nodes.Name): return node.name # type: ignore[no-any-return] if isinstance(node, nodes.Const): return str(node.value) # this is a catch-all for nodes that are not of type Name or Const # extremely helpful for Call or BinOp return node.as_string() # type: ignore[no-any-return] match node: case nodes.If( test=nodes.Compare(left=left, ops=[[operator, right_statement]]), body=[ nodes.Assign( targets=[nodes.AssignName() | nodes.AssignAttr() as target], value=value, ), *_, ], ) if not isinstance(left, nodes.Subscript): # Assign body line has one requirement and that is the assign target # is of type name or attribute. Attribute referring to NamedTuple.x perse. # So we have to check that target is of these types pass case _: return target_assignation = get_node_name(target) body_value = get_node_name(value) left_operand = get_node_name(left) right_statement_value = get_node_name(right_statement) if left_operand == target_assignation: # statement is in expected form pass elif right_statement_value == target_assignation: # statement is in reverse form operator = utils.get_inverse_comparator(operator) else: return if body_value not in (right_statement_value, left_operand): return match operator: case "<" | "<=": reduced_to = ( f"{target_assignation} = max({target_assignation}, {body_value})" ) self.add_message( "consider-using-max-builtin", node=node, args=(reduced_to,) ) case ">" | ">=": reduced_to = ( f"{target_assignation} = min({target_assignation}, {body_value})" ) self.add_message( "consider-using-min-builtin", node=node, args=(reduced_to,) ) @utils.only_required_for_messages("simplifiable-if-expression") def visit_ifexp(self, node: nodes.IfExp) -> None: self._check_simplifiable_ifexp(node) def _check_simplifiable_ifexp(self, node: nodes.IfExp) -> None: match node: case nodes.IfExp( body=nodes.Const(value=bool() as body_value), orelse=nodes.Const(value=bool() as orelse_value), ): pass case _: return if isinstance(node.test, nodes.Compare): test_reduced_to = "test" else: test_reduced_to = "bool(test)" match (body_value, orelse_value): case [True, False]: reduced_to = f"'{test_reduced_to}'" case [False, True]: reduced_to = "'not test'" case _: return self.add_message("simplifiable-if-expression", node=node, args=(reduced_to,)) @utils.only_required_for_messages( "too-many-nested-blocks", "inconsistent-return-statements", "useless-return", "consider-using-with", ) def leave_functiondef(self, node: nodes.FunctionDef) -> None: # check left-over nested blocks stack self._emit_nested_blocks_message_if_needed(self._nested_blocks) # new scope = reinitialize the stack of nested blocks self._nested_blocks = [] # check consistent return statements self._check_consistent_returns(node) # check for single return or return None at the end self._check_return_at_the_end(node) self._return_nodes[node.name] = [] # check for context managers that have been created but not used self._emit_consider_using_with_if_needed( self._consider_using_with_stack.function_scope ) self._consider_using_with_stack.function_scope.clear() @utils.only_required_for_messages("consider-using-with") def leave_classdef(self, _: nodes.ClassDef) -> None: # check for context managers that have been created but not used self._emit_consider_using_with_if_needed( self._consider_using_with_stack.class_scope ) self._consider_using_with_stack.class_scope.clear() @utils.only_required_for_messages("stop-iteration-return") def visit_raise(self, node: nodes.Raise) -> None: self._check_stop_iteration_inside_generator(node) def _check_stop_iteration_inside_generator(self, node: nodes.Raise) -> None: """Check if an exception of type StopIteration is raised inside a generator.""" frame = node.frame() if not (isinstance(frame, nodes.FunctionDef) and frame.is_generator()): return if utils.node_ignores_exception(node, StopIteration): return if not node.exc: return exc = utils.safe_infer(node.exc) if not isinstance(exc, (bases.Instance, nodes.ClassDef)): return if self._check_exception_inherit_from_stopiteration(exc): self.add_message("stop-iteration-return", node=node, confidence=INFERENCE) @staticmethod def _check_exception_inherit_from_stopiteration( exc: nodes.ClassDef | bases.Instance, ) -> bool: """Return True if the exception node in argument inherit from StopIteration.""" stopiteration_qname = f"{utils.EXCEPTIONS_MODULE}.StopIteration" return any(_class.qname() == stopiteration_qname for _class in exc.mro()) def _check_consider_using_comprehension_constructor(self, node: nodes.Call) -> None: match node: case nodes.Call( func=nodes.Name(name=name), args=[nodes.ListComp(elt=element), *_] ): pass case _: return match name: case "dict": if isinstance(element, nodes.Call): return # If we have an `IfExp` here where both the key AND value # are different, then don't raise the issue. See #5588 if ( isinstance(element, nodes.IfExp) and isinstance(element.body, (nodes.Tuple, nodes.List)) and len(element.body.elts) == 2 and isinstance(element.orelse, (nodes.Tuple, nodes.List)) and len(element.orelse.elts) == 2 ): key1, value1 = element.body.elts key2, value2 = element.orelse.elts if ( key1.as_string() != key2.as_string() and value1.as_string() != value2.as_string() ): return message_name = "consider-using-dict-comprehension" self.add_message(message_name, node=node) case "set": message_name = "consider-using-set-comprehension" self.add_message(message_name, node=node) def _check_consider_using_generator(self, node: nodes.Call) -> None: match node: case nodes.Call( func=nodes.Name(name=call_name), args=[nodes.ListComp() as comp] ) if (call_name in CALLS_THAT_SHOULD_USE_GENERATOR): # functions in checked_calls take exactly one positional argument # check whether the argument is list comprehension pass case _: return inside_comp = comp.as_string()[1:-1] # remove square brackets '[]' if node.keywords: inside_comp = f"({inside_comp})" inside_comp += ", " inside_comp += ", ".join(kw.as_string() for kw in node.keywords) if call_name in {"any", "all"}: self.add_message( "use-a-generator", node=node, args=(call_name, inside_comp), ) else: self.add_message( "consider-using-generator", node=node, args=(call_name, inside_comp), ) @utils.only_required_for_messages( "stop-iteration-return", "consider-using-dict-comprehension", "consider-using-set-comprehension", "consider-using-sys-exit", "super-with-arguments", "consider-using-generator", "consider-using-with", "use-list-literal", "use-dict-literal", "use-a-generator", ) def visit_call(self, node: nodes.Call) -> None: self._check_raising_stopiteration_in_generator_next_call(node) self._check_consider_using_comprehension_constructor(node) self._check_quit_exit_call(node) self._check_super_with_arguments(node) self._check_consider_using_generator(node) self._check_consider_using_with(node) self._check_use_list_literal(node) self._check_use_dict_literal(node) @utils.only_required_for_messages("use-yield-from") def visit_yield(self, node: nodes.Yield) -> None: match node: case nodes.Yield( value=nodes.Name(name=name), parent=nodes.Expr(parent=nodes.For(body=[_]) as loop_node), ) if not isinstance(loop_node, nodes.AsyncFor): pass case _: # Avoid a false positive if the return value from `yield` is used, # (such as via Assign, AugAssign, etc). return if loop_node.target.name != name: return if isinstance(node.frame(), nodes.AsyncFunctionDef): return self.add_message("use-yield-from", node=loop_node, confidence=HIGH) @staticmethod def _has_exit_in_scope(scope: nodes.LocalsDictNodeNG) -> bool: exit_func = scope.locals.get("exit") return bool( exit_func and isinstance(exit_func[0], (nodes.ImportFrom, nodes.Import)) ) def _check_quit_exit_call(self, node: nodes.Call) -> None: match node.func: case nodes.Name(name=name) if name in BUILTIN_EXIT_FUNCS: # If we have `exit` imported from `sys` in the current or global scope, # exempt this instance. local_scope = node.scope() if self._has_exit_in_scope(local_scope) or self._has_exit_in_scope( node.root() ): return self.add_message("consider-using-sys-exit", node=node, confidence=HIGH) def _check_super_with_arguments(self, node: nodes.Call) -> None: match node: case nodes.Call( func=nodes.Name(name="super"), args=[nodes.Name(name=name), nodes.Name(name="self")], ) if ( frame_class := node_frame_class(node) ) is not None and name == frame_class.name: pass case _: return self.add_message("super-with-arguments", node=node) def _check_raising_stopiteration_in_generator_next_call( self, node: nodes.Call ) -> None: """Check if a StopIteration exception is raised by the call to next function. If the next value has a default value, then do not add message. :param node: Check to see if this Call node is a next function :type node: :class:`nodes.Call` """ def _looks_like_infinite_iterator(param: nodes.NodeNG) -> bool: inferred = utils.safe_infer(param) if isinstance(inferred, bases.Instance): return inferred.qname() in KNOWN_INFINITE_ITERATORS return False if isinstance(node.func, nodes.Attribute): # A next() method, which is now what we want. return if len(node.args) == 0: # handle case when builtin.next is called without args. # see https://github.com/pylint-dev/pylint/issues/7828 return inferred = utils.safe_infer(node.func) if ( isinstance(inferred, nodes.FunctionDef) and inferred.qname() == "builtins.next" ): frame = node.frame() # The next builtin can only have up to two # positional arguments and no keyword arguments has_sentinel_value = len(node.args) > 1 if ( isinstance(frame, nodes.FunctionDef) and frame.is_generator() and not has_sentinel_value and not utils.node_ignores_exception(node, StopIteration) and not _looks_like_infinite_iterator(node.args[0]) ): self.add_message( "stop-iteration-return", node=node, confidence=INFERENCE ) def _check_nested_blocks( self, node: NodesWithNestedBlocks, ) -> None: """Update and check the number of nested blocks.""" # only check block levels inside functions or methods if not isinstance(node.scope(), nodes.FunctionDef): return # messages are triggered on leaving the nested block. Here we save the # stack in case the current node isn't nested in the previous one nested_blocks = self._nested_blocks[:] if node.parent == node.scope(): self._nested_blocks = [node] else: # go through ancestors from the most nested to the less for ancestor_node in reversed(self._nested_blocks): if ancestor_node == node.parent: break self._nested_blocks.pop() # if the node is an elif, this should not be another nesting level if isinstance(node, nodes.If) and self._is_actual_elif(node): if self._nested_blocks: self._nested_blocks.pop() self._nested_blocks.append(node) # send message only once per group of nested blocks if len(nested_blocks) > len(self._nested_blocks): self._emit_nested_blocks_message_if_needed(nested_blocks) def _emit_nested_blocks_message_if_needed( self, nested_blocks: list[NodesWithNestedBlocks] ) -> None: if len(nested_blocks) > self.linter.config.max_nested_blocks: self.add_message( "too-many-nested-blocks", node=nested_blocks[0], args=(len(nested_blocks), self.linter.config.max_nested_blocks), ) def _emit_consider_using_with_if_needed( self, stack: dict[str, nodes.NodeNG] ) -> None: for node in stack.values(): self.add_message("consider-using-with", node=node) @staticmethod def _duplicated_isinstance_types(node: nodes.BoolOp) -> dict[str, set[str]]: """Get the duplicated types from the underlying isinstance calls. :param nodes.BoolOp node: Node which should contain a bunch of isinstance calls. :returns: Dictionary of the comparison objects from the isinstance calls, to duplicate values from consecutive calls. :rtype: dict """ duplicated_objects: set[str] = set() all_types: collections.defaultdict[str, set[str]] = collections.defaultdict(set) for call in node.values: if not (isinstance(call, nodes.Call) and len(call.args) == 2): continue inferred = utils.safe_infer(call.func) if not (inferred and utils.is_builtin_object(inferred)): continue if inferred.name != "isinstance": continue isinstance_object = call.args[0].as_string() isinstance_types = call.args[1] if isinstance_object in all_types: duplicated_objects.add(isinstance_object) if isinstance(isinstance_types, nodes.Tuple): elems = [ class_type.as_string() for class_type in isinstance_types.itered() ] else: elems = [isinstance_types.as_string()] all_types[isinstance_object].update(elems) # Remove all keys which not duplicated return { key: value for key, value in all_types.items() if key in duplicated_objects } def _check_consider_merging_isinstance(self, node: nodes.BoolOp) -> None: """Check isinstance calls which can be merged together.""" if node.op != "or": return first_args = self._duplicated_isinstance_types(node) for duplicated_name, class_names in first_args.items(): names = sorted(name for name in class_names) self.add_message( "consider-merging-isinstance", node=node, args=(duplicated_name, ", ".join(names)), ) def _check_consider_using_in(self, node: nodes.BoolOp) -> None: allowed_ops = {"or": "==", "and": "!="} if not (node.op in allowed_ops and len(node.values) >= 2): return for value in node.values: match value: case nodes.Compare(left=nodes.Call()) | nodes.Compare( ops=[tuple((_, nodes.Call()))] ): return case nodes.Compare(ops=[tuple((op, _))]) if op in allowed_ops[node.op]: pass case _: return # Gather variables and values from comparisons variables, values = [], [] for value in node.values: variable_set = set() for comparable in value.left, value.ops[0][1]: if isinstance(comparable, (nodes.Name, nodes.Attribute)): variable_set.add(comparable.as_string()) values.append(comparable.as_string()) variables.append(variable_set) # Look for (common-)variables that occur in all comparisons common_variables = reduce(lambda a, b: a.intersection(b), variables) if not common_variables: return # Gather information for the suggestion common_variable = sorted(list(common_variables))[0] values = list(collections.OrderedDict.fromkeys(values)) values.remove(common_variable) values_string = ", ".join(values) if len(values) != 1 else values[0] + "," maybe_not = "" if node.op == "or" else "not " self.add_message( "consider-using-in", node=node, args=(common_variable, maybe_not, values_string), confidence=HIGH, ) def _check_chained_comparison(self, node: nodes.BoolOp) -> None: """Check if there is any chained comparison in the expression. Add a refactoring message if a boolOp contains comparison like a < b and b < c, which can be chained as a < b < c. Care is taken to avoid simplifying a < b < c and b < d. """ if not (node.op == "and" and len(node.values) >= 2): return def _find_lower_upper_bounds( comparison_node: nodes.Compare, uses: collections.defaultdict[str, dict[str, set[nodes.Compare]]], ) -> None: left_operand = comparison_node.left for operator, right_operand in comparison_node.ops: for operand in (left_operand, right_operand): match operand: case nodes.Name(name=value) | nodes.Const(value=value) if ( value is not None ): pass case _: continue match operator: case "<" | "<=": if operand is left_operand: uses[value]["lower_bound"].add(comparison_node) elif operand is right_operand: uses[value]["upper_bound"].add(comparison_node) case ">" | ">=": if operand is left_operand: uses[value]["upper_bound"].add(comparison_node) elif operand is right_operand: uses[value]["lower_bound"].add(comparison_node) left_operand = right_operand uses: collections.defaultdict[str, dict[str, set[nodes.Compare]]] = ( collections.defaultdict( lambda: {"lower_bound": set(), "upper_bound": set()} ) ) for comparison_node in node.values: if isinstance(comparison_node, nodes.Compare): _find_lower_upper_bounds(comparison_node, uses) for bounds in uses.values(): num_shared = len(bounds["lower_bound"].intersection(bounds["upper_bound"])) num_lower_bounds = len(bounds["lower_bound"]) num_upper_bounds = len(bounds["upper_bound"]) if num_shared < num_lower_bounds and num_shared < num_upper_bounds: self.add_message("chained-comparison", node=node) break @staticmethod def _apply_boolean_simplification_rules( operator: str, values: list[nodes.NodeNG] ) -> list[nodes.NodeNG]: """Removes irrelevant values or returns short-circuiting values. This function applies the following two rules: 1) an OR expression with True in it will always be true, and the reverse for AND 2) False values in OR expressions are only relevant if all values are false, and the reverse for AND """ simplified_values: list[nodes.NodeNG] = [] for subnode in values: inferred_bool = None if not next(subnode.nodes_of_class(nodes.Name), False): inferred = utils.safe_infer(subnode) if inferred: inferred_bool = inferred.bool_value() if not isinstance(inferred_bool, bool): simplified_values.append(subnode) elif (operator == "or") == inferred_bool: return [subnode] return simplified_values or [nodes.Const(operator == "and")] def _simplify_boolean_operation(self, bool_op: nodes.BoolOp) -> nodes.BoolOp: """Attempts to simplify a boolean operation. Recursively applies simplification on the operator terms, and keeps track of whether reductions have been made. """ children = list(bool_op.get_children()) intermediate = [ ( self._simplify_boolean_operation(child) if isinstance(child, nodes.BoolOp) else child ) for child in children ] result = self._apply_boolean_simplification_rules(bool_op.op, intermediate) if len(result) < len(children): self._can_simplify_bool_op = True if len(result) == 1: return result[0] simplified_bool_op = copy.copy(bool_op) simplified_bool_op.postinit(result) return simplified_bool_op def _check_simplifiable_condition(self, node: nodes.BoolOp) -> None: """Check if a boolean condition can be simplified. Variables will not be simplified, even if the value can be inferred, and expressions like '3 + 4' will remain expanded. """ if not utils.is_test_condition(node): return self._can_simplify_bool_op = False simplified_expr = self._simplify_boolean_operation(node) if not self._can_simplify_bool_op: return if not next(simplified_expr.nodes_of_class(nodes.Name), False): self.add_message( "condition-evals-to-constant", node=node, args=(node.as_string(), simplified_expr.as_string()), ) else: self.add_message( "simplifiable-condition", node=node, args=(node.as_string(), simplified_expr.as_string()), ) @utils.only_required_for_messages( "consider-merging-isinstance", "consider-using-in", "chained-comparison", "simplifiable-condition", "condition-evals-to-constant", ) def visit_boolop(self, node: nodes.BoolOp) -> None: self._check_consider_merging_isinstance(node) self._check_consider_using_in(node) self._check_chained_comparison(node) self._check_simplifiable_condition(node) @staticmethod def _is_simple_assignment(node: nodes.NodeNG | None) -> bool: match node: case nodes.Assign(targets=[nodes.AssignName()], value=nodes.Name()): return True return False def _check_swap_variables(self, node: nodes.Return | nodes.Assign) -> None: if not (node.next_sibling() and node.next_sibling().next_sibling()): return assignments = [node, node.next_sibling(), node.next_sibling().next_sibling()] if not all(self._is_simple_assignment(node) for node in assignments): return if any(node in self._reported_swap_nodes for node in assignments): return left = [node.targets[0].name for node in assignments] right = [node.value.name for node in assignments] if left[0] == right[-1] and left[1:] == right[:-1]: self._reported_swap_nodes.update(assignments) message = "consider-swap-variables" self.add_message(message, node=node) @utils.only_required_for_messages( "simplify-boolean-expression", "consider-using-ternary", "consider-swap-variables", "consider-using-with", ) def visit_assign(self, node: nodes.Assign) -> None: self._append_context_managers_to_stack(node) self.visit_return(node) # remaining checks are identical as for return nodes @utils.only_required_for_messages( "simplify-boolean-expression", "consider-using-ternary", "consider-swap-variables", ) def visit_return(self, node: nodes.Return | nodes.Assign) -> None: self._check_swap_variables(node) if self._is_and_or_ternary(node.value): cond, truth_value, false_value = self._and_or_ternary_arguments(node.value) else: return if all( isinstance(value, nodes.Compare) for value in (truth_value, false_value) ): return inferred_truth_value = utils.safe_infer(truth_value, compare_constants=True) if inferred_truth_value is None or isinstance( inferred_truth_value, UninferableBase ): return truth_boolean_value = inferred_truth_value.bool_value() if truth_boolean_value is False: message = "simplify-boolean-expression" suggestion = false_value.as_string() else: message = "consider-using-ternary" suggestion = f"{truth_value.as_string()} if {cond.as_string()} else {false_value.as_string()}" self.add_message(message, node=node, args=(suggestion,), confidence=INFERENCE) def _append_context_managers_to_stack(self, node: nodes.Assign) -> None: if _is_inside_context_manager(node): # if we are inside a context manager itself, we assume that it will handle # the resource management itself. return if isinstance(node.targets[0], (nodes.Tuple, nodes.List, nodes.Set)): assignees = node.targets[0].elts value = utils.safe_infer(node.value) if value is None or not hasattr(value, "elts"): # We cannot deduce what values are assigned, so we have to skip this return values = value.elts else: assignees = [node.targets[0]] values = [node.value] if any(isinstance(n, UninferableBase) for n in (assignees, values)): return for assignee, value in zip(assignees, values): if not isinstance(value, nodes.Call): continue inferred = utils.safe_infer(value.func) if not ( inferred and inferred.qname() in CALLS_RETURNING_CONTEXT_MANAGERS and isinstance(assignee, (nodes.AssignName, nodes.AssignAttr)) ): continue stack = self._consider_using_with_stack.get_stack_for_frame(node.frame()) varname = ( assignee.name if isinstance(assignee, nodes.AssignName) else assignee.attrname ) if varname in stack: existing_node = stack[varname] if astroid.are_exclusive(node, existing_node): # only one of the two assignments can be executed at runtime, thus it is fine stack[varname] = value continue # variable was redefined before it was used in a ``with`` block self.add_message( "consider-using-with", node=existing_node, ) stack[varname] = value def _check_consider_using_with(self, node: nodes.Call) -> None: if _is_inside_context_manager(node) or _is_a_return_statement(node): # If we are inside a context manager itself, we assume that it will handle the # resource management itself. # If the node is a child of a return, we assume that the caller knows he is getting # a context manager he should use properly (i.e. in a ``with``). return if ( node in self._consider_using_with_stack.get_stack_for_frame( node.frame() ).values() ): # the result of this call was already assigned to a variable and will be # checked when leaving the scope. return inferred = utils.safe_infer(node.func) if not ( inferred and isinstance( inferred, (nodes.FunctionDef, nodes.ClassDef, bases.BoundMethod) ) ): return could_be_used_in_with = ( # things like ``lock.acquire()`` inferred.qname() in CALLS_THAT_COULD_BE_REPLACED_BY_WITH or ( # things like ``open("foo")`` which are not already inside a ``with`` statement inferred.qname() in CALLS_RETURNING_CONTEXT_MANAGERS and not _is_part_of_with_items(node) ) ) if could_be_used_in_with and not _will_be_released_automatically(node): self.add_message("consider-using-with", node=node) def _check_use_list_literal(self, node: nodes.Call) -> None: """Check if empty list is created by using the literal [].""" if node.as_string() == "list()": inferred = utils.safe_infer(node.func) if isinstance(inferred, nodes.ClassDef) and not node.args: if inferred.qname() == "builtins.list": self.add_message("use-list-literal", node=node) def _check_use_dict_literal(self, node: nodes.Call) -> None: """Check if dict is created by using the literal {}.""" if not (isinstance(node.func, nodes.Name) and node.func.name == "dict"): return inferred = utils.safe_infer(node.func) if ( isinstance(inferred, nodes.ClassDef) and inferred.qname() == "builtins.dict" and not node.args ): self.add_message( "use-dict-literal", args=(self._dict_literal_suggestion(node),), node=node, confidence=INFERENCE, ) @staticmethod def _dict_literal_suggestion(node: nodes.Call) -> str: """Return a suggestion of reasonable length.""" elements: list[str] = [] for keyword in node.keywords: if len(", ".join(elements)) >= 64: break if keyword not in node.kwargs: elements.append(f'"{keyword.arg}": {keyword.value.as_string()}') for keyword in node.kwargs: if len(", ".join(elements)) >= 64: break elements.append(f"**{keyword.value.as_string()}") suggestion = ", ".join(elements) return f"{{{suggestion}{', ... ' if len(suggestion) > 64 else ''}}}" def _name_to_concatenate(self, node: nodes.NodeNG) -> str | None: """Try to extract the name used in a concatenation loop.""" if isinstance(node, nodes.Name): return cast("str | None", node.name) if not isinstance(node, nodes.JoinedStr): return None values = [ value for value in node.values if isinstance(value, nodes.FormattedValue) ] if not (len(values) == 1 and isinstance(values[0].value, nodes.Name)): return None # If there are more values in joined string than formatted values, # they are probably separators. # Allow them only if the option `suggest-join-with-non-empty-separator` is set with_separators = len(node.values) > len(values) if with_separators and not self._suggest_join_with_non_empty_separator: return None return cast("str | None", values[0].value.name) def _check_consider_using_join(self, aug_assign: nodes.AugAssign) -> None: """We start with the augmented assignment and work our way upwards. Names of variables for nodes if match successful: result = '' # assign for number in ['1', '2', '3'] # for_loop result += number # aug_assign """ for_loop = aug_assign.parent if not (isinstance(for_loop, nodes.For) and len(for_loop.body) == 1): return assign = for_loop.previous_sibling() if not isinstance(assign, nodes.Assign): return result_assign_names = { target.name for target in assign.targets if isinstance(target, nodes.AssignName) } is_concat_loop = ( aug_assign.op == "+=" and isinstance(aug_assign.target, nodes.AssignName) and len(for_loop.body) == 1 and aug_assign.target.name in result_assign_names and isinstance(assign.value, nodes.Const) and isinstance(assign.value.value, str) and self._name_to_concatenate(aug_assign.value) == for_loop.target.name ) if is_concat_loop: self.add_message("consider-using-join", node=aug_assign) @utils.only_required_for_messages("consider-using-join") def visit_augassign(self, node: nodes.AugAssign) -> None: self._check_consider_using_join(node) @utils.only_required_for_messages( "unnecessary-comprehension", "unnecessary-dict-index-lookup", "unnecessary-list-index-lookup", ) def visit_comprehension(self, node: nodes.Comprehension) -> None: self._check_unnecessary_comprehension(node) self._check_unnecessary_dict_index_lookup(node) self._check_unnecessary_list_index_lookup(node) def _check_unnecessary_comprehension(self, node: nodes.Comprehension) -> None: if isinstance(node.parent, nodes.GeneratorExp) or not ( len(node.ifs) == 0 and len(node.parent.generators) == 1 and node.is_async is False ): return match node: case nodes.Comprehension( target=nodes.Tuple(elts=elts), parent=nodes.DictComp( key=nodes.Name(name=key_name), value=nodes.Name(name=value_name), ), ) if all(isinstance(elt, nodes.AssignName) for elt in elts): expr_list = [key_name, value_name] target_list = [elt.name for elt in elts] case nodes.Comprehension( parent=nodes.ListComp() | nodes.SetComp() as parent ): match expr := parent.elt: case nodes.Name(name=expr_list): pass case nodes.Tuple(): if not all(isinstance(elt, nodes.Name) for elt in expr.elts): return expr_list = [elt.name for elt in expr.elts] case _: expr_list = [] match target := parent.generators[0].target: case nodes.AssignName(name=target_list): pass case nodes.Tuple(): target_list = [ elt.name for elt in target.elts if isinstance(elt, nodes.AssignName) ] case _: target_list = [] case _: return if expr_list == target_list and expr_list: args: tuple[str] | None = None inferred = utils.safe_infer(node.iter) match (node.parent, inferred): case [nodes.DictComp(), objects.DictItems()]: args = (f"dict({node.iter.func.expr.as_string()})",) case [nodes.ListComp(), nodes.List()]: args = (f"list({node.iter.as_string()})",) case [nodes.SetComp(), nodes.Set()]: args = (f"set({node.iter.as_string()})",) if args: self.add_message( "unnecessary-comprehension", node=node.parent, args=args ) return match node.parent: case nodes.DictComp(): func = "dict" case nodes.ListComp(): func = "list" case nodes.SetComp(): func = "set" case _: # pragma: no cover raise AssertionError self.add_message( "unnecessary-comprehension", node=node.parent, args=(f"{func}({node.iter.as_string()})",), ) @staticmethod def _is_and_or_ternary(node: nodes.NodeNG | None) -> bool: """Returns true if node is 'condition and true_value or false_value' form. All of: condition, true_value and false_value should not be a complex boolean expression """ match node: case nodes.BoolOp( op="or", values=[nodes.BoolOp(op="and", values=[_, v1]), v2] ) if not (isinstance(v2, nodes.BoolOp) or isinstance(v1, nodes.BoolOp)): return True return False @staticmethod def _and_or_ternary_arguments( node: nodes.BoolOp, ) -> tuple[nodes.NodeNG, nodes.NodeNG, nodes.NodeNG]: false_value = node.values[1] condition, true_value = node.values[0].values return condition, true_value, false_value def visit_functiondef(self, node: nodes.FunctionDef) -> None: self._return_nodes[node.name] = list( node.nodes_of_class(nodes.Return, skip_klass=nodes.FunctionDef) ) def _check_consistent_returns(self, node: nodes.FunctionDef) -> None: """Check that all return statements inside a function are consistent. Return statements are consistent if: - all returns are explicit and if there is no implicit return; - all returns are empty and if there is, possibly, an implicit return. Args: node (nodes.FunctionDef): the function holding the return statements. """ # explicit return statements are those with a not None value explicit_returns = [ _node for _node in self._return_nodes[node.name] if _node.value is not None ] if not explicit_returns: return if len(explicit_returns) == len( self._return_nodes[node.name] ) and self._is_node_return_ended(node): return self.add_message("inconsistent-return-statements", node=node) def _is_if_node_return_ended(self, node: nodes.If) -> bool: """Check if the If node ends with an explicit return statement. Args: node (nodes.If): If node to be checked. Returns: bool: True if the node ends with an explicit statement, False otherwise. """ # Do not check if inner function definition are return ended. is_if_returning = any( self._is_node_return_ended(_ifn) for _ifn in node.body if not isinstance(_ifn, nodes.FunctionDef) ) if not node.orelse: # If there is not orelse part then the if statement is returning if : # - there is at least one return statement in its siblings; # - the if body is itself returning. if not self._has_return_in_siblings(node): return False return is_if_returning # If there is an orelse part then both if body and orelse part should return. is_orelse_returning = any( self._is_node_return_ended(_ore) for _ore in node.orelse if not isinstance(_ore, nodes.FunctionDef) ) return is_if_returning and is_orelse_returning def _is_raise_node_return_ended(self, node: nodes.Raise) -> bool: """Check if the Raise node ends with an explicit return statement. Args: node (nodes.Raise): Raise node to be checked. Returns: bool: True if the node ends with an explicit statement, False otherwise. """ # a Raise statement doesn't need to end with a return statement # but if the exception raised is handled, then the handler has to # ends with a return statement if not node.exc: # Ignore bare raises return True if not utils.is_node_inside_try_except(node): # If the raise statement is not inside a try/except statement # then the exception is raised and cannot be caught. No need # to infer it. return True exc = utils.safe_infer(node.exc) if ( exc is None or isinstance(exc, UninferableBase) or not hasattr(exc, "pytype") ): return False exc_name = exc.pytype().split(".")[-1] handlers = utils.get_exception_handlers(node, exc_name) handlers = list(handlers) if handlers is not None else [] if handlers: # among all the handlers handling the exception at least one # must end with a return statement return any(self._is_node_return_ended(_handler) for _handler in handlers) # if no handlers handle the exception then it's ok return True def _is_node_return_ended(self, node: nodes.NodeNG) -> bool: """Check if the node ends with an explicit return statement. Args: node (nodes.NodeNG): node to be checked. Returns: bool: True if the node ends with an explicit statement, False otherwise. """ match node: case nodes.Return(): # Recursion base case return True case nodes.Call(): if utils.is_terminating_func(node): return True return any( isinstance(maybe_func, (nodes.FunctionDef, bases.BoundMethod)) and self._is_function_def_never_returning(maybe_func) for maybe_func in utils.infer_all(node.func) ) case nodes.While(): # A while-loop is considered return-ended if it has a # truthy test and no break statements return (node.test.bool_value() and not _loop_exits_early(node)) or any( self._is_node_return_ended(child) for child in node.orelse ) case nodes.Raise(): return self._is_raise_node_return_ended(node) case nodes.If(): return self._is_if_node_return_ended(node) case nodes.Try(): handlers = { _child for _child in node.get_children() if isinstance(_child, nodes.ExceptHandler) } all_but_handler = set(node.get_children()) - handlers return any( self._is_node_return_ended(_child) for _child in all_but_handler ) and all(self._is_node_return_ended(_child) for _child in handlers) case nodes.Assert(test=nodes.Const(value=False | 0)): # consider assert False as a return node return True # recurses on the children of the node return any(self._is_node_return_ended(_child) for _child in node.get_children()) @staticmethod def _has_return_in_siblings(node: nodes.NodeNG) -> bool: """Returns True if there is at least one return in the node's siblings.""" next_sibling = node.next_sibling() while next_sibling: if isinstance(next_sibling, nodes.Return): return True next_sibling = next_sibling.next_sibling() return False def _is_function_def_never_returning( self, node: nodes.FunctionDef | astroid.BoundMethod ) -> bool: """Return True if the function never returns, False otherwise. Args: node (nodes.FunctionDef or astroid.BoundMethod): function definition node to be analyzed. Returns: bool: True if the function never returns, False otherwise. """ try: if node.qname() in self._never_returning_functions: return True except (TypeError, AttributeError): pass try: returns: nodes.NodeNG | None = node.returns except AttributeError: return False # the BoundMethod proxy may be a lambda without a returns match returns: case nodes.Attribute(attrname=name) | nodes.Name(name=name): return name in {"NoReturn", "Never"} return False def _check_return_at_the_end(self, node: nodes.FunctionDef) -> None: """Check for presence of a *single* return statement at the end of a function. "return" or "return None" are useless because None is the default return type if they are missing. NOTE: produces a message only if there is a single return statement in the function body. Otherwise _check_consistent_returns() is called! Per its implementation and PEP8 we can have a "return None" at the end of the function body if there are other return statements before that! """ if len(self._return_nodes[node.name]) != 1: return if not node.body: return last = node.body[-1] if isinstance(last, nodes.Return) and len(node.body) == 1: return while isinstance(last, (nodes.If, nodes.Try, nodes.ExceptHandler)): last = last.last_child() match last: case nodes.Return(value=None): # e.g. "return" self.add_message("useless-return", node=node) case nodes.Return(value=nodes.Const(value=None)): # return None" self.add_message("useless-return", node=node) def _check_unnecessary_dict_index_lookup( self, node: nodes.For | nodes.Comprehension ) -> None: """Add message when accessing dict values by index lookup.""" # Verify that we have an .items() call and # that the object which is iterated is used as a subscript in the # body of the for. # Is it a proper items call? match node.iter: case nodes.Call(func=nodes.Attribute(attrname="items", expr=expr)): pass case _: return inferred = utils.safe_infer(node.iter.func) if not isinstance(inferred, astroid.BoundMethod): return iterating_object_name = expr.as_string() # Store potential violations. These will only be reported if we don't # discover any writes to the collection during the loop. messages = [] # Verify that the body of the for loop uses a subscript # with the object that was iterated. This uses some heuristics # in order to make sure that the same object is used in the # for body. children = ( node.body if isinstance(node, nodes.For) else list(node.parent.get_children()) ) # Check if there are any for / while loops within the loop in question; # If so, we will be more conservative about reporting errors as we # can't yet do proper control flow analysis to be sure when # reassignment will affect us nested_loops = itertools.chain.from_iterable( child.nodes_of_class((nodes.For, nodes.While)) for child in children ) has_nested_loops = next(nested_loops, None) is not None for child in children: for subscript in child.nodes_of_class(nodes.Subscript): if not isinstance(subscript.value, (nodes.Name, nodes.Attribute)): continue value = subscript.slice if isinstance(node, nodes.For) and _is_part_of_assignment_target( subscript ): # Ignore this subscript if it is the target of an assignment # Early termination; after reassignment dict index lookup will be necessary return if isinstance(subscript.parent, nodes.Delete): # Ignore this subscript if it's used with the delete keyword return # Case where .items is assigned to k,v (i.e., for k, v in d.items()) if isinstance(value, nodes.Name): if not ( isinstance(node.target, nodes.Tuple) # Ignore 1-tuples: for k, in d.items() and len(node.target.elts) >= 2 and value.name == node.target.elts[0].name and iterating_object_name == subscript.value.as_string() ): continue if ( isinstance(node, nodes.For) and value.lookup(value.name)[1][-1].lineno > node.lineno ): # Ignore this subscript if it has been redefined after # the for loop. This checks for the line number using .lookup() # to get the line number where the iterating object was last # defined and compare that to the for loop's line number continue if has_nested_loops: messages.append( { "node": subscript, "variable": node.target.elts[1].as_string(), } ) else: self.add_message( "unnecessary-dict-index-lookup", node=subscript, args=(node.target.elts[1].as_string(),), ) # Case where .items is assigned to single var (i.e., for item in d.items()) elif isinstance(value, nodes.Subscript): if not ( isinstance(node.target, nodes.AssignName) and isinstance(value.value, nodes.Name) and node.target.name == value.value.name and iterating_object_name == subscript.value.as_string() ): continue if ( isinstance(node, nodes.For) and value.value.lookup(value.value.name)[1][-1].lineno > node.lineno ): # Ignore this subscript if it has been redefined after # the for loop. This checks for the line number using .lookup() # to get the line number where the iterating object was last # defined and compare that to the for loop's line number continue # check if subscripted by 0 (key) inferred = utils.safe_infer(value.slice) if not (isinstance(inferred, nodes.Const) and inferred.value == 0): continue if has_nested_loops: messages.append( { "node": subscript, "variable": "1".join( value.as_string().rsplit("0", maxsplit=1) ), } ) else: self.add_message( "unnecessary-dict-index-lookup", node=subscript, args=("1".join(value.as_string().rsplit("0", maxsplit=1)),), ) for message in messages: self.add_message( "unnecessary-dict-index-lookup", node=message["node"], args=(message["variable"],), ) def _check_unnecessary_list_index_lookup( self, node: nodes.For | nodes.Comprehension ) -> None: match node.iter: case nodes.Call(func=nodes.Name(name="enumerate")): pass case _: return preliminary_confidence = HIGH try: iterable_arg = utils.get_argument_from_call( node.iter, position=0, keyword="iterable" ) except utils.NoSuchArgumentError: iterable_arg = utils.infer_kwarg_from_call(node.iter, keyword="iterable") preliminary_confidence = INFERENCE if not isinstance(iterable_arg, nodes.Name): return match node.target: case nodes.Tuple( elts=[nodes.AssignName(name=name1), nodes.AssignName(name=name2), *_] ): pass case _: # enumerate() result is being assigned without destructuring # The value is not being assigned to a single variable, e.g. being # destructured, so we can't necessarily use it. return has_start_arg, confidence = self._enumerate_with_start(node) if has_start_arg: # enumerate is being called with start arg/kwarg so resulting index lookup # is not redundant, hence we should not report an error. return # Preserve preliminary_confidence if it was INFERENCE confidence = ( preliminary_confidence if preliminary_confidence == INFERENCE else confidence ) iterating_object_name = iterable_arg.name # Store potential violations. These will only be reported if we don't # discover any writes to the collection during the loop. bad_nodes = [] children = ( node.body if isinstance(node, nodes.For) else list(node.parent.get_children()) ) # Check if there are any for / while loops within the loop in question; # If so, we will be more conservative about reporting errors as we # can't yet do proper control flow analysis to be sure when # reassignment will affect us nested_loops = itertools.chain.from_iterable( child.nodes_of_class((nodes.For, nodes.While)) for child in children ) has_nested_loops = next(nested_loops, None) is not None # Check if there are any if statements within the loop in question; # If so, we will be more conservative about reporting errors as we # can't yet do proper control flow analysis to be sure when # reassignment will affect us if_statements = itertools.chain.from_iterable( child.nodes_of_class(nodes.If) for child in children ) has_if_statements = next(if_statements, None) is not None for child in children: for subscript in child.nodes_of_class(nodes.Subscript): if isinstance(node, nodes.For) and _is_part_of_assignment_target( subscript ): # Ignore this subscript if it is the target of an assignment # Early termination; after reassignment index lookup will be necessary return if isinstance(subscript.parent, nodes.Delete): # Ignore this subscript if it's used with the delete keyword return index = subscript.slice if isinstance(index, nodes.Name): if not ( index.name == name1 and iterating_object_name == subscript.value.as_string() ): continue if ( isinstance(node, nodes.For) and (lookup_results := index.lookup(index.name)[1]) and lookup_results[-1].lineno > node.lineno ): # Ignore this subscript if it has been redefined after # the for loop. continue if ( isinstance(node, nodes.For) and index.lookup(name2)[1][-1].lineno > node.lineno ): # The variable holding the value from iteration has been # reassigned on a later line, so it can't be used. continue if has_nested_loops: # Have found a likely issue, but since there are nested # loops we don't want to report this unless we get to the # end of the loop without updating the collection bad_nodes.append(subscript) elif has_if_statements: continue else: self.add_message( "unnecessary-list-index-lookup", node=subscript, args=(name2,), confidence=confidence, ) for subscript in bad_nodes: self.add_message( "unnecessary-list-index-lookup", node=subscript, args=(name2,), confidence=confidence, ) def _enumerate_with_start( self, node: nodes.For | nodes.Comprehension ) -> tuple[bool, Confidence]: """Check presence of `start` kwarg or second argument to enumerate. For example: `enumerate([1,2,3], start=1)` `enumerate([1,2,3], 1)` If `start` is assigned to `0`, the default value, this is equivalent to not calling `enumerate` with start. """ confidence = HIGH if len(node.iter.args) > 1: # We assume the second argument to `enumerate` is the `start` int arg. # It's a reasonable assumption for now as it's the only possible argument: # https://docs.python.org/3/library/functions.html#enumerate start_arg = node.iter.args[1] start_val, confidence = self._get_start_value(start_arg) if start_val is None: return False, confidence return not start_val == 0, confidence for keyword in node.iter.keywords: if keyword.arg == "start": start_val, confidence = self._get_start_value(keyword.value) if start_val is None: return False, confidence return not start_val == 0, confidence return False, confidence def _get_start_value(self, node: nodes.NodeNG) -> tuple[int | None, Confidence]: # Most common use cases are a constant integer or minus a constant integer. We # don't need inference for that. If that's not the case, we assume arbitrary # complexity and we use inference. match node: case nodes.Const(): return node.value, HIGH case nodes.UnaryOp(operand=nodes.Const()): return node.operand.value, HIGH inferred = utils.safe_infer(node) if isinstance(inferred, nodes.Const): return inferred.value, INFERENCE # inferred can be an 'astroid.base.Instance' in 'enumerate(x, int(y))', # for example. We're doing nothing in this case for now, as extracting # the value is costly. # At this point the most likely cases is that the node is uninferable # But we don't have to check if it's actually uninferable. return None, INFERENCE
RefactoringChecker
python
getsentry__sentry
tests/sentry/integrations/github/test_client.py
{ "start": 34193, "end": 36702 }
class ____(TestCase): @mock.patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1") def setUp(self, get_jwt): integration = self.create_integration( organization=self.organization, provider="github", name="Github Test Org", external_id="1", metadata={"access_token": None, "expires_at": None}, ) self.repo_1 = Repository.objects.create( organization_id=self.organization.id, name="Test-Organization/foo", url="https://github.com/Test-Organization/foo", provider="integrations:github", external_id=123, integration_id=integration.id, ) self.repo_2 = Repository.objects.create( organization_id=self.organization.id, name="Test-Organization/bar", url="https://github.com/Test-Organization/bar", provider="integrations:github", external_id=456, integration_id=integration.id, ) self.repo_3 = Repository.objects.create( organization_id=self.organization.id, name="Test-Organization/other", url="https://github.com/Test-Organization/other", provider="integrations:github", external_id=789, integration_id=integration.id, ) install = integration.get_installation(organization_id=self.organization.id) assert isinstance(install, GitHubIntegration) self.install = install self.github_client = self.install.get_client() responses.add( method=responses.POST, url="https://api.github.com/app/installations/1/access_tokens", body='{"token": "12345token", "expires_at": "2030-01-01T00:00:00Z"}', status=200, content_type="application/json", ) responses.add( method=responses.GET, url="https://api.github.com/rate_limit", body=orjson.dumps( { "resources": { "graphql": { "limit": 5000, "used": 1, "remaining": 4999, "reset": 1613064000, } } } ).decode(), status=200, content_type="application/json", )
GitHubClientFileBlameBase
python
ansible__ansible
test/units/modules/test_service_facts.py
{ "start": 4790, "end": 5915 }
class ____(unittest.TestCase): def setUp(self): self.mock1 = patch.object(basic.AnsibleModule, 'get_bin_path', return_value='/usr/sbin/lssrc') self.mock1.start() self.addCleanup(self.mock1.stop) self.mock2 = patch.object(basic.AnsibleModule, 'run_command', return_value=(0, LSSRC_OUTPUT, '')) self.mock2.start() self.addCleanup(self.mock2.stop) self.mock3 = patch('platform.system', return_value='AIX') self.mock3.start() self.addCleanup(self.mock3.stop) def test_gather_services(self): svcmod = AIXScanService(basic.AnsibleModule) result = svcmod.gather_services() self.assertIsInstance(result, dict) self.assertIn('IBM.HostRM', result) self.assertEqual(result['IBM.HostRM'], { 'name': 'IBM.HostRM', 'source': 'src', 'state': 'running', }) self.assertIn('IBM.AuditRM', result) self.assertEqual(result['IBM.AuditRM'], { 'name': 'IBM.AuditRM', 'source': 'src', 'state': 'stopped', })
TestAIXScanService
python
dask__distributed
distributed/tests/test_client.py
{ "start": 108788, "end": 136666 }
class ____(Exception): pass @contextmanager def catch_unhandled_exceptions() -> Generator[None]: loop = asyncio.get_running_loop() ctxs: list[dict[str, Any]] = [] old_handler = loop.get_exception_handler() @loop.set_exception_handler def _(loop: object, context: dict[str, Any]) -> None: ctxs.append(context) try: yield finally: loop.set_exception_handler(old_handler) if ctxs: msgs = [] for i, ctx in enumerate(ctxs, 1): msgs.append(ctx["message"]) print( f"------ Unhandled exception {i}/{len(ctxs)}: {ctx['message']!r} ------" ) print(ctx) if exc := ctx.get("exception"): traceback.print_exception(type(exc), exc, exc.__traceback__) raise UnhandledExceptions(", ".join(msgs)) @gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5}) async def test_reconnect_timeout(c, s): with ( catch_unhandled_exceptions(), captured_logger(logging.getLogger("distributed.client")) as logger, ): await s.close() while c.status != "closed": await asyncio.sleep(0.05) text = logger.getvalue() assert "Failed to reconnect" in text @pytest.mark.avoid_ci(reason="hangs on github actions ubuntu-latest CI") @pytest.mark.slow @pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows") @pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)]) def test_open_close_many_workers(loop, worker, count, repeat): proc = psutil.Process() with cluster(nworkers=0, active_rpc_timeout=2) as (s, _): gc.collect() before = proc.num_fds() done = Semaphore(0) running = weakref.WeakKeyDictionary() workers = set() status = True async def start_worker(sleep, duration, repeat=1): for _ in range(repeat): await asyncio.sleep(sleep) if not status: return w = worker(s["address"], loop=loop) running[w] = None await w workers.add(w) addr = w.worker_address running[w] = addr await asyncio.sleep(duration) await w.close() del w await asyncio.sleep(0) done.release() for _ in range(count): loop.add_callback( start_worker, random.random() / 5, random.random() / 5, repeat=repeat ) with Client(s["address"], loop=loop) as c: sleep(1) for _ in range(count): done.acquire(timeout=5) gc.collect() if not running: break start = time() while c.nthreads(): sleep(0.2) assert time() < start + 10 while len(workers) < count * repeat: sleep(0.2) status = False [c.sync(w.close) for w in list(workers)] for w in workers: assert w.status == Status.closed start = time() while proc.num_fds() > before: print("fds:", before, proc.num_fds()) sleep(0.1) if time() > start + 10: if worker == Worker: # this is an esoteric case print("File descriptors did not clean up") break else: raise ValueError("File descriptors did not clean up") @gen_cluster() async def test_idempotence(s, a, b): async with ( Client(s.address, asynchronous=True) as c, Client(s.address, asynchronous=True) as f, ): # Submit x = c.submit(inc, 1) await x log = list(s.transition_log) len_single_submit = len(log) # see last assert y = f.submit(inc, 1) assert x.key == y.key await y await asyncio.sleep(0.1) log2 = list(s.transition_log) assert log == log2 # Error a = c.submit(div, 1, 0) await wait(a) assert a.status == "error" log = list(s.transition_log) b = f.submit(div, 1, 0) assert a.key == b.key await wait(b) await asyncio.sleep(0.1) log2 = list(s.transition_log) assert log == log2 s.transition_log.clear() # Simultaneous Submit d = c.submit(inc, 2) e = c.submit(inc, 2) await wait([d, e]) assert len(s.transition_log) == len_single_submit def test_scheduler_info(c): info = c.scheduler_info() assert isinstance(info, dict) assert len(info["workers"]) == 2 assert isinstance(info["started"], float) info = c.scheduler_info(n_workers=1) assert len(info["workers"]) == 1 info = c.scheduler_info(n_workers=-1) assert len(info["workers"]) == 2 def test_write_scheduler_file(c, loop): info = c.scheduler_info() with tmpfile("json") as scheduler_file: c.write_scheduler_file(scheduler_file) with Client(scheduler_file=scheduler_file, loop=loop) as c2: info2 = c2.scheduler_info() assert c.scheduler.address == c2.scheduler.address # test that a ValueError is raised if the scheduler_file # attribute is already set with pytest.raises(ValueError): c.write_scheduler_file(scheduler_file) def test_get_versions_sync(c): requests = pytest.importorskip("requests") v = c.get_versions() assert v["scheduler"] is not None assert v["client"] is not None assert len(v["workers"]) == 2 for wv in v["workers"].values(): assert wv is not None c.get_versions(check=True) # smoke test for versions # that this does not raise v = c.get_versions(packages=["requests"]) assert v["client"]["packages"]["requests"] == requests.__version__ @gen_cluster(client=True) async def test_get_versions_async(c, s, a, b): v = await c.get_versions(check=True) assert v.keys() == {"scheduler", "client", "workers"} @gen_cluster(client=True, config={"distributed.comm.timeouts.connect": "200ms"}) async def test_get_versions_rpc_error(c, s, a, b): a.stop() v = await c.get_versions() assert v.keys() == {"scheduler", "client", "workers"} assert v["workers"].keys() == {b.address} def test_threaded_get_within_distributed(c): import dask.multiprocessing for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]: def f(get): return get({"x": (lambda: 1,)}, "x") future = c.submit(f, get) assert future.result() == 1 @gen_cluster(client=True) async def test_lose_scattered_data(c, s, a, b): [x] = await c.scatter([1], workers=a.address) await a.close() await asyncio.sleep(0.1) assert x.status == "cancelled" assert x.key not in s.tasks @gen_cluster( client=True, nthreads=[("127.0.0.1", 1)] * 3, config=NO_AMM, ) async def test_partially_lose_scattered_data(e, s, a, b, c): x = await e.scatter(1, workers=a.address) await e.replicate(x, n=2) await a.close() await asyncio.sleep(0.1) assert x.status == "finished" assert s.get_task_status(keys=[x.key]) == {x.key: "memory"} @gen_cluster(client=True, nthreads=[("", 1)]) async def test_scatter_compute_lose(c, s, a): x = (await c.scatter({"x": 1}, workers=[a.address]))["x"] async with BlockedGatherDep(s.address) as b: y = c.submit(inc, x, key="y", workers=[b.address]) await wait_for_state("x", "flight", b) await a.close() b.block_gather_dep.set() with pytest.raises(CancelledError): await wait(y) assert x.status == "cancelled" assert y.status == "cancelled" @gen_cluster(client=True) async def test_scatter_compute_store_lose(c, s, a, b): """ Create irreplaceable data on one machine, cause a dependent computation to occur on another and complete Kill the machine with the irreplaceable data. What happens to the complete result? How about after it GCs and tries to come back? """ x = await c.scatter(1, workers=a.address) xx = c.submit(inc, x, workers=a.address) y = c.submit(inc, 1) z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address) await wait(z) await a.close() while x.status == "finished": await asyncio.sleep(0.01) # assert xx.status == 'finished' assert y.status == "finished" assert z.status == "finished" zz = c.submit(inc, z) await wait(zz) zkey = z.key del z while s.get_task_status(keys=[zkey]) != {zkey: "released"}: await asyncio.sleep(0.01) xxkey = xx.key del xx while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks: await asyncio.sleep(0.01) @gen_cluster(client=True) async def test_scatter_compute_store_lose_processing(c, s, a, b): """ Create irreplaceable data on one machine, cause a dependent computation to occur on another and complete Kill the machine with the irreplaceable data. What happens to the complete result? How about after it GCs and tries to come back? """ [x] = await c.scatter([1], workers=a.address) y = c.submit(slowinc, x, delay=0.2) z = c.submit(inc, y) await asyncio.sleep(0.1) await a.close() while x.status == "finished": await asyncio.sleep(0.01) assert y.status == "cancelled" assert z.status == "cancelled" @gen_cluster() async def test_temp_default_client(s, a, b): async with ( Client(s.address, asynchronous=True) as c1, Client(s.address, asynchronous=True) as c2, ): with temp_default_client(c1): assert default_client() is c1 assert default_client(c2) is c2 with temp_default_client(c2): assert default_client() is c2 assert default_client(c1) is c1 @gen_cluster(client=True) async def test_as_current(c, s, a, b): async with ( Client(s.address, asynchronous=True) as c1, Client(s.address, asynchronous=True) as c2, ): with temp_default_client(c): assert Client.current() is c assert Client.current(allow_global=False) is c with c1.as_current(): assert Client.current() is c1 assert Client.current(allow_global=True) is c1 with c2.as_current(): assert Client.current() is c2 assert Client.current(allow_global=True) is c2 def test_as_current_is_thread_local(s, loop): parties = 2 cm_after_enter = threading.Barrier(parties=parties, timeout=5) cm_before_exit = threading.Barrier(parties=parties, timeout=5) def run(): with Client(s["address"], loop=loop) as c: with c.as_current(): cm_after_enter.wait() try: # This line runs only when all parties are inside the # context manager assert Client.current(allow_global=False) is c assert default_client() is c finally: cm_before_exit.wait() with concurrent.futures.ThreadPoolExecutor(max_workers=parties) as tpe: for fut in concurrent.futures.as_completed( [tpe.submit(run) for _ in range(parties)] ): fut.result() @gen_cluster() async def test_as_current_is_task_local(s, a, b): l1 = asyncio.Lock() l2 = asyncio.Lock() l3 = asyncio.Lock() l4 = asyncio.Lock() await l1.acquire() await l2.acquire() await l3.acquire() await l4.acquire() async def run1(): async with Client(s.address, asynchronous=True) as c: with c.as_current(): await l1.acquire() l2.release() try: # This line runs only when both run1 and run2 are inside the # context manager assert Client.current(allow_global=False) is c finally: await l3.acquire() l4.release() async def run2(): async with Client(s.address, asynchronous=True) as c: with c.as_current(): l1.release() await l2.acquire() try: # This line runs only when both run1 and run2 are inside the # context manager assert Client.current(allow_global=False) is c finally: l3.release() await l4.acquire() await asyncio.gather(run1(), run2()) @nodebug # test timing is fragile @gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True, config=NO_AMM) async def test_persist_workers_annotate(e, s, a, b, c): with dask.annotate(workers=a.address, allow_other_workers=False): L1 = [delayed(inc)(i) for i in range(4)] with dask.annotate(workers=b.address, allow_other_workers=False): total = delayed(sum)(L1) with dask.annotate(workers=c.address, allow_other_workers=True): L2 = [delayed(add)(i, total) for i in L1] with dask.annotate(workers=b.address, allow_other_workers=True): total2 = delayed(sum)(L2) # TODO: once annotations are faithfully forwarded upon graph optimization, # we shouldn't need to disable that here. out = e.persist(L1 + L2 + [total, total2], optimize_graph=False) await wait(out) assert all(v.key in a.data for v in L1) assert total.key in b.data assert s.tasks[total2.key].loose_restrictions for v in L2: assert s.tasks[v.key].loose_restrictions @gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True) async def test_persist_workers_annotate2(e, s, a, b, c): addr = a.address def key_to_worker(key): return addr L1 = [delayed(inc)(i) for i in range(4)] for x in L1: assert all(layer.annotations is None for layer in x.dask.layers.values()) with dask.annotate(workers=key_to_worker): out = e.persist(L1, optimize_graph=False) await wait(out) for x in L1: assert all(layer.annotations is None for layer in x.dask.layers.values()) for v in L1: assert s.tasks[v.key].worker_restrictions == {a.address} @nodebug # test timing is fragile @gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True) async def test_persist_workers(e, s, a, b, c): L1 = [delayed(inc)(i) for i in range(4)] total = delayed(sum)(L1) L2 = [delayed(add)(i, total) for i in L1] total2 = delayed(sum)(L2) out = e.persist( L1 + L2 + [total, total2], workers=[a.address, b.address], allow_other_workers=True, ) await wait(out) for v in L1 + L2 + [total, total2]: assert s.tasks[v.key].worker_restrictions == {a.address, b.address} assert not any(c.address in ts.worker_restrictions for ts in s.tasks.values()) assert s.tasks[total.key].loose_restrictions for v in L1 + L2: assert s.tasks[v.key].loose_restrictions @gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True) async def test_compute_workers_annotate(e, s, a, b, c): with dask.annotate(workers=a.address, allow_other_workers=True): L1 = [delayed(inc)(i) for i in range(4)] with dask.annotate(workers=b.address, allow_other_workers=True): total = delayed(sum)(L1) with dask.annotate(workers=[c.address]): L2 = [delayed(add)(i, total) for i in L1] # TODO: once annotations are faithfully forwarded upon graph optimization, # we shouldn't need to disable that here. out = e.compute(L1 + L2 + [total], optimize_graph=False) await wait(out) for v in L1: assert s.tasks[v.key].worker_restrictions == {a.address} for v in L2: assert s.tasks[v.key].worker_restrictions == {c.address} assert s.tasks[total.key].worker_restrictions == {b.address} assert s.tasks[total.key].loose_restrictions for v in L1: assert s.tasks[v.key].loose_restrictions @gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True) async def test_compute_workers(e, s, a, b, c): L1 = [delayed(inc)(i) for i in range(4)] total = delayed(sum)(L1) L2 = [delayed(add)(i, total) for i in L1] out = e.compute( L1 + L2 + [total], workers=[a.address, b.address], allow_other_workers=True, ) await wait(out) for v in L1 + L2 + [total]: assert s.tasks[v.key].worker_restrictions == {a.address, b.address} assert not any(c.address in ts.worker_restrictions for ts in s.tasks.values()) assert s.tasks[total.key].loose_restrictions for v in L1 + L2: assert s.tasks[v.key].loose_restrictions @gen_cluster(client=True) async def test_compute_nested_containers(c, s, a, b): np = pytest.importorskip("numpy") da = pytest.importorskip("dask.array") x = da.ones(10, chunks=(5,)) + 1 future = c.compute({"x": [x], "y": 123}) result = await future assert isinstance(result, dict) assert (result["x"][0] == np.ones(10) + 1).all() assert result["y"] == 123 @gen_cluster(client=True) async def test_scatter_type(c, s, a, b): [future] = await c.scatter([1]) assert future.type == int d = await c.scatter({"x": 1.0}) assert d["x"].type == float @gen_cluster(client=True) async def test_retire_workers_2(c, s, a, b): [x] = await c.scatter([1], workers=a.address) info = await s.retire_workers(workers=[a.address]) assert info assert info[a.address] assert "name" in info[a.address] assert a.address not in s.workers assert b.data == {x.key: 1} assert {ws.address for ws in s.tasks[x.key].who_has} == {b.address} assert {ts.key for ts in s.workers[b.address].has_what} == {x.key} assert a.address not in s.workers @gen_cluster(client=True, nthreads=[("", 1)] * 10) async def test_retire_many_workers(c, s, *workers): futures = await c.scatter(list(range(100))) info = await s.retire_workers(workers=[w.address for w in workers[:7]]) assert len(info) == 7 results = await c.gather(futures) assert results == list(range(100)) while len(s.workers) != 3: await asyncio.sleep(0.01) assert len(s.workers) == 3 assert all(future.done() for future in futures) assert all(s.tasks[future.key].state == "memory" for future in futures) assert await c.gather(futures) == list(range(100)) # Don't count how many task landed on each worker. # Normally, tasks would be distributed evenly over the surviving workers. However, # here all workers share the same process memory, so you'll get an unintuitive # distribution of tasks if for any reason one transfer take longer than 2 seconds # and as a consequence the Active Memory Manager ends up running for two iterations. # This is something that will happen more frequently on low-powered CI machines. # See test_active_memory_manager.py for tests that robustly verify the statistical # distribution of tasks after worker retirement. @gen_cluster( client=True, nthreads=[("127.0.0.1", 3)] * 2, config={ "distributed.scheduler.work-stealing": False, "distributed.scheduler.default-task-durations": {"f": "10ms"}, }, ) async def test_weight_occupancy_against_data_movement(c, s, a, b): def f(x, y=0, z=0): sleep(0.01) return x y = await c.scatter([[1, 2, 3, 4]], workers=[a.address]) z = await c.scatter([1], workers=[b.address]) futures = c.map(f, [1, 2, 3, 4], y=y, z=z) await wait(futures) assert sum(f.key in a.data for f in futures) >= 2 assert sum(f.key in b.data for f in futures) >= 1 @gen_cluster( client=True, nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)], config=merge( NO_AMM, { "distributed.scheduler.work-stealing": False, "distributed.scheduler.default-task-durations": {"f": "10ms"}, }, ), ) async def test_distribute_tasks_by_nthreads(c, s, a, b): def f(x, y=0): sleep(0.01) return x y = await c.scatter([1], broadcast=True) futures = c.map(f, range(20), y=y) await wait(futures) assert len(b.data) > 2 * len(a.data) @gen_cluster(client=True, clean_kwargs={"threads": False}) async def test_add_done_callback(c, s, a, b): S = set() def f(future): future.add_done_callback(g) def g(future): S.add((future.key, future.status)) u = c.submit(inc, 1, key="u") v = c.submit(throws, "hello", key="v") w = c.submit(slowinc, 2, delay=0.3, key="w") x = c.submit(inc, 3, key="x") u.add_done_callback(f) v.add_done_callback(f) w.add_done_callback(f) await wait((u, v, w, x)) x.add_done_callback(f) while len(S) < 4: await asyncio.sleep(0.01) assert S == {(f.key, f.status) for f in (u, v, w, x)} @gen_cluster(client=True) async def test_normalize_collection(c, s, a, b): x = delayed(inc)(1) y = delayed(inc)(x) z = delayed(inc)(y) yy = c.persist(y) zz = c.normalize_collection(z) assert len(z.dask) == len(y.dask) + 1 assert isinstance(zz.dask[y.key], Future) assert len(zz.dask) < len(z.dask) @gen_cluster(client=True) async def test_normalize_collection_dask_array(c, s, a, b): pytest.importorskip("numpy") da = pytest.importorskip("dask.array") x = da.ones(10, chunks=(5,)) y = x + 1 yy = c.persist(y) z = y.sum() zdsk = dict(z.dask) zz = c.normalize_collection(z) assert z.dask == zdsk # do not mutate input assert len(z.dask) > len(zz.dask) assert any(isinstance(v, Future) for v in zz.dask.values()) for k, v in yy.dask.items(): assert zz.dask[k].key == v.key result1 = await c.compute(z) result2 = await c.compute(zz) assert result1 == result2 @pytest.mark.slow def test_normalize_collection_with_released_futures(c): pytest.importorskip("numpy") da = pytest.importorskip("dask.array") x = da.arange(2**20, chunks=2**10) y = x.persist() wait(y) sol = y.sum().compute() # Start releasing futures del y # Try to reuse futures. Previously this was a race condition, # and the call to `.compute()` would error out due to missing # futures on the scheduler at compute time. normalized = c.normalize_collection(x) res = normalized.sum().compute() assert res == sol def assert_no_data_loss(scheduler): for key, start, finish, recommendations, _, _ in scheduler.transition_log: if start == "memory" and finish == "released": for k, v in recommendations.items(): assert not (k == key and v == "waiting") @gen_cluster(client=True) async def test_interleave_computations(c, s, a, b): import distributed distributed.g = s xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)] ys = [delayed(slowdec)(x, delay=0.02) for x in xs] zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)] total = delayed(sum)(zs) future = c.compute(total) done = ("memory", "released") await asyncio.sleep(0.1) x_keys = [x.key for x in xs] y_keys = [y.key for y in ys] z_keys = [z.key for z in zs] while not s.tasks or any(w.processing for w in s.workers.values()): await asyncio.sleep(0.05) x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values()) y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values()) z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values()) assert x_done >= y_done >= z_done assert x_done < y_done + 10 assert y_done < z_done + 10 assert_no_data_loss(s) @pytest.mark.skip(reason="Now prefer first-in-first-out") @gen_cluster(client=True) async def test_interleave_computations_map(c, s, a, b): xs = c.map(slowinc, range(30), delay=0.02) ys = c.map(slowdec, xs, delay=0.02) zs = c.map(slowadd, xs, ys, delay=0.02) done = ("memory", "released") x_keys = [x.key for x in xs] y_keys = [y.key for y in ys] z_keys = [z.key for z in zs] while not s.tasks or any(w.processing for w in s.workers.values()): await asyncio.sleep(0.05) x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values()) y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values()) z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values()) assert x_done >= y_done >= z_done assert x_done < y_done + 10 assert y_done < z_done + 10 @gen_cluster(client=True) async def test_scatter_dict_workers(c, s, a, b): await c.scatter({"a": 10}, workers=[a.address, b.address]) assert "a" in a.data or "a" in b.data @pytest.mark.slow @gen_test() async def test_client_timeout(): """`await Client(...)` keeps retrying for 10 seconds if it can't find the Scheduler straight away """ port = open_port() stop_event = asyncio.Event() async def run_client(): try: async with Client(f"127.0.0.1:{port}", asynchronous=True) as c: return await c.run_on_scheduler(lambda: 123) finally: stop_event.set() async def run_scheduler_after_2_seconds(): # TODO: start a scheduler that waits for the first connection and # closes it await asyncio.sleep(2) async with Scheduler(port=port, dashboard_address=":0"): await stop_event.wait() with dask.config.set({"distributed.comm.timeouts.connect": "10s"}): assert await asyncio.gather( run_client(), run_scheduler_after_2_seconds(), ) == [123, None] @gen_cluster(client=True) async def test_submit_list_kwargs(c, s, a, b): futures = await c.scatter([1, 2, 3]) def f(L=None): return sum(L) future = c.submit(f, L=futures) result = await future assert result == 1 + 2 + 3 @gen_cluster(client=True, nthreads=[("", 1)]) async def test_map_list_kwargs(c, s, a): futures = await c.scatter([1, 2, 3]) def f(i, L=None): return i + sum(L) futures = c.map(f, range(10), L=futures) results = await c.gather(futures) assert results == [i + 6 for i in range(10)] def test_recreate_error_sync(c): x0 = c.submit(dec, 2) y0 = c.submit(dec, 1) x = c.submit(div, 1, x0) y = c.submit(div, 1, y0) f = c.submit(sum, x, y) wait(f) assert f.status == "error" with pytest.raises(ZeroDivisionError): c.recreate_error_locally(f) assert f.status == "error" def test_recreate_error_not_error(c): f = c.submit(dec, 2) with pytest.raises(ValueError, match="No errored futures passed"): c.recreate_error_locally(f) def test_recreate_task_sync(c): x0 = c.submit(dec, 2) y0 = c.submit(dec, 2) x = c.submit(div, 1, x0) y = c.submit(div, 1, y0) f = c.submit(sum, [x, y]) wait(f) assert c.recreate_task_locally(f) == 2 @gen_cluster(client=True) async def test_retire_workers(c, s, a, b): assert set(s.workers) == {a.address, b.address} info = await c.retire_workers(workers=[a.address], close_workers=True) # Deployment tooling is sometimes relying on this information to be returned # This represents WorkerState.identity() right now but may be slimmed down in # the future assert info assert info[a.address] assert "name" in info[a.address] assert set(s.workers) == {b.address} while a.status != Status.closed: await asyncio.sleep(0.01)
UnhandledExceptions
python
getsentry__sentry
src/sentry/shared_integrations/exceptions/__init__.py
{ "start": 4721, "end": 5057 }
class ____(IntegrationError): """ Error when external API access is blocked due to configuration issues like permissions, visibility changes, or invalid project settings. This is not a product error, but rather an integration setup issue that requires user intervention. """ pass
IntegrationConfigurationError
python
neetcode-gh__leetcode
python/0918-maximum-sum-circular-subarray.py
{ "start": 0, "end": 479 }
class ____: def maxSubarraySumCircular(self, nums: List[int]) -> int: globMax, globMin = nums[0], nums[0] curMax, curMin = 0, 0 total = 0 for i, n in enumerate(nums): curMax = max(curMax + n, n) curMin = min(curMin + n, n) total += n globMax = max(curMax, globMax) globMin = min(curMin, globMin) return max(globMax, total - globMin) if globMax > 0 else globMax
Solution
python
sympy__sympy
sympy/functions/elementary/exponential.py
{ "start": 6519, "end": 19814 }
class ____(ExpBase, metaclass=ExpMeta): """ The exponential function, :math:`e^x`. Examples ======== >>> from sympy import exp, I, pi >>> from sympy.abc import x >>> exp(x) exp(x) >>> exp(x).diff(x) exp(x) >>> exp(I*pi) -1 Parameters ========== arg : Expr See Also ======== sympy.functions.elementary.exponential.log """ def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: return self else: raise ArgumentIndexError(self, argindex) def _eval_refine(self, assumptions): from sympy.assumptions import ask, Q arg = self.args[0] if arg.is_Mul: Ioo = I*S.Infinity if arg in [Ioo, -Ioo]: return S.NaN coeff = arg.as_coefficient(pi*I) if coeff: if ask(Q.integer(2*coeff)): if ask(Q.even(coeff)): return S.One elif ask(Q.odd(coeff)): return S.NegativeOne elif ask(Q.even(coeff + S.Half)): return -I elif ask(Q.odd(coeff + S.Half)): return I @classmethod def eval(cls, arg): from sympy.calculus import AccumBounds from sympy.matrices.matrixbase import MatrixBase from sympy.sets.setexpr import SetExpr from sympy.simplify.simplify import logcombine if isinstance(arg, MatrixBase): return arg.exp() elif global_parameters.exp_is_pow: return Pow(S.Exp1, arg) elif arg.is_Number: if arg is S.NaN: return S.NaN elif arg.is_zero: return S.One elif arg is S.One: return S.Exp1 elif arg is S.Infinity: return S.Infinity elif arg is S.NegativeInfinity: return S.Zero elif arg is S.ComplexInfinity: return S.NaN elif isinstance(arg, log): return arg.args[0] elif isinstance(arg, AccumBounds): return AccumBounds(exp(arg.min), exp(arg.max)) elif isinstance(arg, SetExpr): return arg._eval_func(cls) elif arg.is_Mul: coeff = arg.as_coefficient(pi*I) if coeff: if (2*coeff).is_integer: if coeff.is_even: return S.One elif coeff.is_odd: return S.NegativeOne elif (coeff + S.Half).is_even: return -I elif (coeff + S.Half).is_odd: return I elif coeff.is_Rational: ncoeff = coeff % 2 # restrict to [0, 2pi) if ncoeff > 1: # restrict to (-pi, pi] ncoeff -= 2 if ncoeff != coeff: return cls(ncoeff*pi*I) # Warning: code in risch.py will be very sensitive to changes # in this (see DifferentialExtension). # look for a single log factor coeff, terms = arg.as_coeff_Mul() # but it can't be multiplied by oo if coeff in [S.NegativeInfinity, S.Infinity]: if terms.is_number: if coeff is S.NegativeInfinity: terms = -terms if re(terms).is_zero and terms is not S.Zero: return S.NaN if re(terms).is_positive and im(terms) is not S.Zero: return S.ComplexInfinity if re(terms).is_negative: return S.Zero return None coeffs, log_term = [coeff], None for term in Mul.make_args(terms): term_ = logcombine(term) if isinstance(term_, log): if log_term is None: log_term = term_.args[0] else: return None elif term.is_comparable: coeffs.append(term) else: return None return log_term**Mul(*coeffs) if log_term else None elif arg.is_Add: out = [] add = [] argchanged = False for a in arg.args: if a is S.One: add.append(a) continue newa = cls(a) if isinstance(newa, cls): if newa.args[0] != a: add.append(newa.args[0]) argchanged = True else: add.append(a) else: out.append(newa) if out or argchanged: return Mul(*out)*cls(Add(*add), evaluate=False) if arg.is_zero: return S.One @property def base(self): """ Returns the base of the exponential function. """ return S.Exp1 @staticmethod @cacheit def taylor_term(n, x, *previous_terms): """ Calculates the next term in the Taylor series expansion. """ if n < 0: return S.Zero if n == 0: return S.One x = sympify(x) if previous_terms: p = previous_terms[-1] if p is not None: return p * x / n return x**n/factorial(n) def as_real_imag(self, deep=True, **hints): """ Returns this function as a 2-tuple representing a complex number. Examples ======== >>> from sympy import exp, I >>> from sympy.abc import x >>> exp(x).as_real_imag() (exp(re(x))*cos(im(x)), exp(re(x))*sin(im(x))) >>> exp(1).as_real_imag() (E, 0) >>> exp(I).as_real_imag() (cos(1), sin(1)) >>> exp(1+I).as_real_imag() (E*cos(1), E*sin(1)) See Also ======== sympy.functions.elementary.complexes.re sympy.functions.elementary.complexes.im """ from sympy.functions.elementary.trigonometric import cos, sin re, im = self.args[0].as_real_imag() if deep: re = re.expand(deep, **hints) im = im.expand(deep, **hints) cos, sin = cos(im), sin(im) return (exp(re)*cos, exp(re)*sin) def _eval_subs(self, old, new): # keep processing of power-like args centralized in Pow if old.is_Pow: # handle (exp(3*log(x))).subs(x**2, z) -> z**(3/2) old = exp(old.exp*log(old.base)) elif old is S.Exp1 and new.is_Function: old = exp if isinstance(old, exp) or old is S.Exp1: f = lambda a: Pow(*a.as_base_exp(), evaluate=False) if ( a.is_Pow or isinstance(a, exp)) else a return Pow._eval_subs(f(self), f(old), new) if old is exp and not new.is_Function: return new**self.exp._subs(old, new) return super()._eval_subs(old, new) def _eval_is_extended_real(self): if self.args[0].is_extended_real: return True elif self.args[0].is_imaginary: arg2 = -S(2) * I * self.args[0] / pi return arg2.is_even def _eval_is_complex(self): def complex_extended_negative(arg): yield arg.is_complex yield arg.is_extended_negative return fuzzy_or(complex_extended_negative(self.args[0])) def _eval_is_algebraic(self): if (self.exp / pi / I).is_rational: return True if fuzzy_not(self.exp.is_zero): if self.exp.is_algebraic: return False elif (self.exp / pi).is_rational: return False def _eval_is_extended_positive(self): if self.exp.is_extended_real: return self.args[0] is not S.NegativeInfinity elif self.exp.is_imaginary: arg2 = -I * self.args[0] / pi return arg2.is_even def _eval_nseries(self, x, n, logx, cdir=0): # NOTE Please see the comment at the beginning of this file, labelled # IMPORTANT. from sympy.functions.elementary.complexes import sign from sympy.functions.elementary.integers import ceiling from sympy.series.limits import limit from sympy.series.order import Order from sympy.simplify.powsimp import powsimp arg = self.exp arg_series = arg._eval_nseries(x, n=n, logx=logx) if arg_series.is_Order: return 1 + arg_series arg0 = limit(arg_series.removeO(), x, 0) if arg0 is S.NegativeInfinity: return Order(x**n, x) if arg0 is S.Infinity: return self if arg0.is_infinite: raise PoleError("Cannot expand %s around 0" % (self)) # checking for indecisiveness/ sign terms in arg0 if any(isinstance(arg, sign) for arg in arg0.args): return self t = Dummy("t") nterms = n try: cf = Order(arg.as_leading_term(x, logx=logx), x).getn() except (NotImplementedError, PoleError): cf = 0 if cf and cf > 0: nterms = ceiling(n/cf) exp_series = exp(t)._taylor(t, nterms) r = exp(arg0)*exp_series.subs(t, arg_series - arg0) rep = {logx: log(x)} if logx is not None else {} if r.subs(rep) == self: return r if cf and cf > 1: r += Order((arg_series - arg0)**n, x)/x**((cf-1)*n) else: r += Order((arg_series - arg0)**n, x) r = r.expand() r = powsimp(r, deep=True, combine='exp') # powsimp may introduce unexpanded (-1)**Rational; see PR #17201 simplerat = lambda x: x.is_Rational and x.q in [3, 4, 6] w = Wild('w', properties=[simplerat]) r = r.replace(S.NegativeOne**w, expand_complex(S.NegativeOne**w)) return r def _taylor(self, x, n): l = [] g = None for i in range(n): g = self.taylor_term(i, self.args[0], g) g = g.nseries(x, n=n) l.append(g.removeO()) return Add(*l) def _eval_as_leading_term(self, x, logx, cdir): from sympy.calculus.util import AccumBounds arg = self.args[0].cancel().as_leading_term(x, logx=logx) arg0 = arg.subs(x, 0) if arg is S.NaN: return S.NaN if isinstance(arg0, AccumBounds): # This check addresses a corner case involving AccumBounds. # if isinstance(arg, AccumBounds) is True, then arg0 can either be 0, # AccumBounds(-oo, 0) or AccumBounds(-oo, oo). # Check out function: test_issue_18473() in test_exponential.py and # test_limits.py for more information. if re(cdir) < S.Zero: return exp(-arg0) return exp(arg0) if arg0 is S.NaN: arg0 = arg.limit(x, 0) if arg0.is_infinite is False: return exp(arg0) raise PoleError("Cannot expand %s around 0" % (self)) def _eval_rewrite_as_sin(self, arg, **kwargs): from sympy.functions.elementary.trigonometric import sin return sin(I*arg + pi/2) - I*sin(I*arg) def _eval_rewrite_as_cos(self, arg, **kwargs): from sympy.functions.elementary.trigonometric import cos return cos(I*arg) + I*cos(I*arg + pi/2) def _eval_rewrite_as_tanh(self, arg, **kwargs): from sympy.functions.elementary.hyperbolic import tanh return (1 + tanh(arg/2))/(1 - tanh(arg/2)) def _eval_rewrite_as_sqrt(self, arg, **kwargs): from sympy.functions.elementary.trigonometric import sin, cos if arg.is_Mul: coeff = arg.coeff(pi*I) if coeff and coeff.is_number: cosine, sine = cos(pi*coeff), sin(pi*coeff) if not isinstance(cosine, cos) and not isinstance (sine, sin): return cosine + I*sine def _eval_rewrite_as_Pow(self, arg, **kwargs): if arg.is_Mul: logs = [a for a in arg.args if isinstance(a, log) and len(a.args) == 1] if logs: return Pow(logs[0].args[0], arg.coeff(logs[0])) def match_real_imag(expr): r""" Try to match expr with $a + Ib$ for real $a$ and $b$. ``match_real_imag`` returns a tuple containing the real and imaginary parts of expr or ``(None, None)`` if direct matching is not possible. Contrary to :func:`~.re`, :func:`~.im``, and ``as_real_imag()``, this helper will not force things by returning expressions themselves containing ``re()`` or ``im()`` and it does not expand its argument either. """ r_, i_ = expr.as_independent(I, as_Add=True) if i_ == 0 and r_.is_real: return (r_, i_) i_ = i_.as_coefficient(I) if i_ and i_.is_real and r_.is_real: return (r_, i_) else: return (None, None) # simpler to check for than None
exp
python
davidhalter__jedi
jedi/inference/value/dynamic_arrays.py
{ "start": 7312, "end": 7527 }
class ____(_Modification): def py__iter__(self, contextualized_node=None): yield from self._wrapped_value.py__iter__(contextualized_node) yield LazyKnownValues(self._assigned_values)
ListModification
python
airbytehq__airbyte
airbyte-integrations/connectors/source-tiktok-marketing/unit_tests/integration/test_creative_assets_portfolios.py
{ "start": 534, "end": 1479 }
class ____(TestCase): stream_name = "creative_assets_portfolios" advertiser_id = "872746382648" def catalog(self, sync_mode: SyncMode = SyncMode.full_refresh): return CatalogBuilder().with_stream(name=self.stream_name, sync_mode=sync_mode).build() def config(self): return ConfigBuilder().build() @HttpMocker() def test_basic_read(self, http_mocker: HttpMocker): mock_advertisers_slices(http_mocker, self.config()) http_mocker.get( HttpRequest( url=f"https://business-api.tiktok.com/open_api/v1.3/creative/portfolio/list/?page_size=100&advertiser_id=872746382648", ), HttpResponse(body=json.dumps(find_template(self.stream_name, __file__)), status_code=200), ) output = read(get_source(config=self.config(), state=None), self.config(), self.catalog()) assert len(output.records) == 2
TestCreativeAssetsPortfolios
python
catalyst-team__catalyst
catalyst/data/loader.py
{ "start": 193, "end": 1380 }
class ____(DataLoader): """Loader wrapper interface. Args: loader: torch dataloader. """ def __init__(self, loader: DataLoader): """Init""" self.origin = loader def __getattr__(self, key): """ Gets attribute by ``key``. Firstly, looks at the ``origin`` for the appropriate ``key``. If none founds - looks at the wrappers attributes. If could not found anything - returns ``None``. Args: key: attribute key Returns: attribute value """ some_default_value = "_no_attr_found_" value = self.origin.__dict__.get(key, some_default_value) # value = getattr(self.origin, key, None) if value != some_default_value: return value value = self.__dict__.get(key, some_default_value) # value = getattr(self, key, None) if value != some_default_value: return value return None def __len__(self) -> int: """Returns length of the wrapper loader. Returns: int: length of the wrapper loader """ return len(self.origin)
ILoaderWrapper
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_line01.py
{ "start": 315, "end": 1343 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_line01.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "line"}) chart.axis_ids = [43408000, 43434368] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
pypa__pip
src/pip/_internal/cli/index_command.py
{ "start": 1419, "end": 4592 }
class ____(CommandContextMixIn): """ A class mixin for command classes needing _build_session(). """ def __init__(self) -> None: super().__init__() self._session: PipSession | None = None @classmethod def _get_index_urls(cls, options: Values) -> list[str] | None: """Return a list of index urls from user-provided options.""" index_urls = [] if not getattr(options, "no_index", False): url = getattr(options, "index_url", None) if url: index_urls.append(url) urls = getattr(options, "extra_index_urls", None) if urls: index_urls.extend(urls) # Return None rather than an empty list return index_urls or None def get_default_session(self, options: Values) -> PipSession: """Get a default-managed session.""" if self._session is None: self._session = self.enter_context(self._build_session(options)) # there's no type annotation on requests.Session, so it's # automatically ContextManager[Any] and self._session becomes Any, # then https://github.com/python/mypy/issues/7696 kicks in assert self._session is not None return self._session def _build_session( self, options: Values, retries: int | None = None, timeout: int | None = None, ) -> PipSession: from pip._internal.network.session import PipSession cache_dir = options.cache_dir assert not cache_dir or os.path.isabs(cache_dir) if "legacy-certs" not in options.deprecated_features_enabled: ssl_context = _create_truststore_ssl_context() else: ssl_context = None session = PipSession( cache=os.path.join(cache_dir, "http-v2") if cache_dir else None, retries=retries if retries is not None else options.retries, trusted_hosts=options.trusted_hosts, index_urls=self._get_index_urls(options), ssl_context=ssl_context, ) # Handle custom ca-bundles from the user if options.cert: session.verify = options.cert # Handle SSL client certificate if options.client_cert: session.cert = options.client_cert # Handle timeouts if options.timeout or timeout: session.timeout = timeout if timeout is not None else options.timeout # Handle configured proxies if options.proxy: session.proxies = { "http": options.proxy, "https": options.proxy, } session.trust_env = False session.pip_proxy = options.proxy # Determine if we can prompt the user for authentication or not session.auth.prompting = not options.no_input session.auth.keyring_provider = options.keyring_provider return session def _pip_self_version_check(session: PipSession, options: Values) -> None: from pip._internal.self_outdated_check import pip_self_version_check as check check(session, options)
SessionCommandMixin
python
joke2k__faker
faker/providers/company/en_PH/__init__.py
{ "start": 82, "end": 3694 }
class ____(CompanyProvider): """ Provider for company names for en_PH locale Company naming scheme and probabilities are inspired by and/or based on existing companies in the Philippines. Sources: - https://en.wikipedia.org/wiki/List_of_companies_of_the_Philippines - https://www.pse.com.ph/stockMarket/listedCompanyDirectory.html """ formats = OrderedDict( [ ( "{{random_company_adjective}} {{random_company_noun_chain}} {{company_type}} {{company_suffix}}", 0.24, ), ( "{{random_company_acronym}} {{random_company_noun_chain}} {{company_type}} {{company_suffix}}", 0.24, ), ( "{{last_name}} {{random_company_noun_chain}} {{company_type}} {{company_suffix}}", 0.16, ), ("{{random_company_adjective}} {{company_type}} {{company_suffix}}", 0.12), ("{{random_company_acronym}} {{company_type}} {{company_suffix}}", 0.12), ("{{last_name}} {{company_type}} {{company_suffix}}", 0.09), ( "National {{random_company_product}} Corporation of the Philippines", 0.03, ), ] ) company_suffixes = OrderedDict( [ ("Inc.", 0.45), ("Corporation", 0.45), ("Limited", 0.1), ] ) company_types = ( "Bank", "Banking", "Capital", "Company", "Construction", "Development", "Enterprise", "Equities", "Finance", "Foods", "Group", "Holdings", "Hotel", "Manufacturing", "Mining", "Properties", "Resorts", "Resources", "Services", "Shipping", "Solutions", "Technologies", "Trust", "Ventures", ) company_products = ( "Bottle", "Coconut", "Computer", "Electricity", "Flour", "Furniture", "Glass", "Newspaper", "Pillow", "Water", ) company_nouns = ( "Century", "City", "Crown", "Dragon", "Empire", "Genesis", "Gold", "King", "Liberty", "Millennium", "Morning", "Silver", "Star", "State", "Summit", "Sun", "Union", "World", ) company_adjectives = ( "Advanced", "Rising", "Double", "Triple", "Quad", "Allied", "Cyber", "Sovereign", "Great", "Far", "Northern", "Southern", "Eastern", "Western", "First", "Filipino", "Grand", "Manila", "Mega", "Metro", "Global", "Pacific", "Oriental", "Philippine", "Prime", ) def company_type(self) -> str: return self.random_element(self.company_types) def random_company_adjective(self) -> str: return self.random_element(self.company_adjectives) def random_company_noun_chain(self) -> str: return " ".join(self.random_elements(self.company_nouns, length=self.random_int(1, 2), unique=True)) def random_company_product(self) -> str: return self.random_element(self.company_products) def random_company_acronym(self) -> str: letters = self.random_letters(self.random_int(2, 4)) return "".join(letters).upper()
Provider
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_top_left_cell02.py
{ "start": 315, "end": 813 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("top_left_cell02.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.set_top_left_cell(15, 6) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-convex/destination_convex/client.py
{ "start": 171, "end": 2501 }
class ____: def __init__(self, config: ConvexConfig, table_metadata: Mapping[str, Any]): self.deployment_url = config["deployment_url"] self.access_key = config["access_key"] self.table_metadata = table_metadata def batch_write(self, records: List[Mapping[str, Any]]) -> requests.Response: """ See Convex docs: https://docs.convex.dev/http-api/#post-apistreaming_importimport_airbyte_records """ request_body = {"tables": self.table_metadata, "messages": records} return self._request("POST", endpoint="import_airbyte_records", json=request_body) def delete(self, keys: List[str]) -> requests.Response: """ See Convex docs: https://docs.convex.dev/http-api/#put-apistreaming_importclear_tables """ request_body = {"tableNames": keys} return self._request("PUT", endpoint="clear_tables", json=request_body) def add_primary_key_indexes(self, indexes: Mapping[str, List[List[str]]]) -> requests.Response: """ See Convex docs: https://docs.convex.dev/http-api/#put-apistreaming_importadd_primary_key_indexes """ return self._request("PUT", "add_primary_key_indexes", json={"indexes": indexes}) def primary_key_indexes_ready(self, tables: List[str]) -> requests.Response: """ See Convex docs: https://docs.convex.dev/http-api/#get-apistreaming_importprimary_key_indexes_ready """ return self._request("GET", "primary_key_indexes_ready", json={"tables": tables}) def _get_auth_headers(self) -> Mapping[str, str]: return {"Authorization": f"Convex {self.access_key}"} def _request( self, http_method: str, endpoint: str, json: Mapping[str, Any], ) -> requests.Response: url = f"{self.deployment_url}/api/streaming_import/{endpoint}" headers = { "Accept": "application/json", "Convex-Client": "streaming-import-0.1.0", **self._get_auth_headers(), } response = requests.request(method=http_method, url=url, headers=headers, json=json) if response.status_code != 200: raise Exception(f"Request to `{url}` failed with status code {response.status_code}: {response.text}") return response
ConvexClient
python
dask__dask
dask/array/_array_expr/_creation.py
{ "start": 2375, "end": 3617 }
class ____(Arange): _parameters = ["start", "stop", "num", "endpoint", "chunks", "dtype"] _defaults = {"num": 50, "endpoint": True, "chunks": "auto", "dtype": None} like = None @functools.cached_property def num_rows(self): return self.operand("num") @functools.cached_property def dtype(self): return self.operand("dtype") or np.linspace(0, 1, 1).dtype @functools.cached_property def step(self): range_ = self.stop - self.start div = (self.num_rows - 1) if self.endpoint else self.num_rows if div == 0: div = 1 return float(range_) / div def _layer(self) -> dict: dsk = {} blockstart = self.start func = partial(_linspace, endpoint=self.endpoint, dtype=self.dtype) for i, bs in enumerate(self.chunks[0]): bs_space = bs - 1 if self.endpoint else bs blockstop = blockstart + (bs_space * self.step) task = Task( (self._name, i), func, blockstart, blockstop, bs, ) blockstart = blockstart + (self.step * bs) dsk[task.key] = task return dsk
Linspace
python
weaviate__weaviate-python-client
weaviate/collections/classes/config.py
{ "start": 13261, "end": 13372 }
class ____(_GenerativeOpenAIConfigBase): resourceName: str deploymentId: str
_GenerativeAzureOpenAIConfig
python
astropy__astropy
astropy/modeling/spline.py
{ "start": 19820, "end": 21928 }
class ____(_SplineFitter): """ Fit a smoothing spline. """ def __call__(self, model, x, y, **kwargs): """ Fit a smoothing spline to data. Parameters ---------- model : `Spline1D` The spline model to fit. x : array-like The x data values. y : array-like The y data values. s : float, optional Positive smoothing factor used to choose the number of knots. The parameter can be used to control the tradeoff between closeness and smoothness of fit. Larger ``s`` means more smoothing while smaller values of ``s`` indicate less smoothing. automatically. A value of 0 results in an interpolating spline. See `scipy.interpolate.UnivariateSpline` for details. **kwargs : dict, optional Additional keyword arguments: - ``weights`` : array-like, optional Weights for the data points. - ``bbox`` : array-like, optional The bounding box limits as ``[xmin, xmax]``. Default is ``[None, None]``. Returns ------- fitted_model : `Spline1D` A copy of the input model with fitted parameters. """ return super().__call__(model, x, y, **kwargs) def _fit_method(self, model, x, y, **kwargs): s = kwargs.pop("s", None) weights = kwargs.pop("weights", None) bbox = kwargs.pop("bbox", [None, None]) if model.user_knots: warnings.warn( "The user-specified knots from the input model " "will be ignored for smoothing data.", AstropyUserWarning, ) model.user_knots = False if bbox != [None, None]: model.bounding_box = bbox from scipy.interpolate import UnivariateSpline spline = UnivariateSpline(x, y, w=weights, bbox=bbox, k=model.degree, s=s) model.tck = spline._eval_args return spline
SplineSmoothingFitter
python
dagster-io__dagster
python_modules/dagster/dagster/_config/stack.py
{ "start": 1813, "end": 3056 }
class ____(EvaluationStackEntry): parent: Optional["EvaluationStackEntry"] = None def iter_entries(self): yield from [] def get_friendly_path_msg(stack: EvaluationStackEntry) -> str: return get_friendly_path_info(stack)[0] def get_friendly_path_info(stack: EvaluationStackEntry) -> tuple[str, str]: if isinstance(stack, EvaluationStackRoot): path = "" path_msg = "at the root" else: comps = ["root"] for entry in stack.entries: if isinstance(entry, EvaluationStackPathEntry): comp = ":" + entry.field_name comps.append(comp) elif isinstance(entry, EvaluationStackListItemEntry): comps.append(f"[{entry.list_index}]") elif isinstance(entry, EvaluationStackMapKeyEntry): comp = ":" + repr(entry.map_key) + ":key" comps.append(comp) elif isinstance(entry, EvaluationStackMapValueEntry): comp = ":" + repr(entry.map_key) + ":value" comps.append(comp) else: check.failed("unsupported") path = "".join(comps) path_msg = "at path " + path return path_msg, path
EvaluationStackRoot
python
openai__openai-python
src/openai/types/fine_tuning/alpha/grader_run_response.py
{ "start": 1094, "end": 1323 }
class ____(BaseModel): errors: MetadataErrors execution_time: float name: str sampled_model_name: Optional[str] = None scores: Dict[str, object] token_usage: Optional[int] = None type: str
Metadata
python
getsentry__sentry
src/sentry/api/serializers/rest_framework/savedsearch.py
{ "start": 614, "end": 1154 }
class ____(BaseOrganizationSearchSerializer): """ Organization admins/owners may create organization wide saved searches """ # TODO(epurkhiser): Once the frontend is deployed we should change this to # default to OWNER since that is a more sane default than organization # visibile. visibility = serializers.ChoiceField( choices=select_visibility_choices([Visibility.OWNER, Visibility.ORGANIZATION]), default=Visibility.ORGANIZATION, required=False, )
OrganizationSearchAdminSerializer
python
huggingface__transformers
src/transformers/models/vilt/modeling_vilt.py
{ "start": 18791, "end": 20360 }
class ____(GradientCheckpointingLayer): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ViltAttention(config) self.intermediate = ViltIntermediate(config) self.output = ViltOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViLT, layernorm is applied before self-attention attention_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states.to(attention_output.device) # in ViLT, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs
ViltLayer
python
prabhupant__python-ds
data_structures/graphs/cycle_in_undirected_graph.py
{ "start": 38, "end": 773 }
class ____: def __init__(self, vertices): self.V = vertices self.graph = defaultdict(list) def add_edge(self, u, v): self.graph[u].append(v) def is_cyclic_util(self, v, visited, parent): visited[v] = True for i in self.graph[v]: if visited[i] == False: if self.is_cyclic_util(i, visited, v): return True elif parent != i: return True return False def is_cyclic(self): visited = [False] * (self.V) for i in range(self.V): if visited[i] == False: if self.is_cyclic_util(i, visited, -1): return True return False
Graph
python
getsentry__sentry
tests/sentry/workflow_engine/endpoints/validators/test_base_data_condition.py
{ "start": 4484, "end": 4966 }
class ____(AbstractDataConditionValidator[int, bool]): def validate_comparison(self, value: Any) -> int: if isinstance(value, int): return value else: raise ValidationError("Comparison must be an integer") def validate_condition_result(self, value: Any) -> bool: if isinstance(value, bool): return value else: raise ValidationError("Condition result must be a boolean")
ExampleConditionValidator
python
spack__spack
lib/spack/spack/llnl/util/lock.py
{ "start": 25326, "end": 27982 }
class ____: """Simple nested transaction context manager that uses a file lock. Arguments: lock (Lock): underlying lock for this transaction to be acquired on enter and released on exit acquire (typing.Callable or contextlib.contextmanager): function to be called after lock is acquired, or contextmanager to enter after acquire and leave before release. release (typing.Callable): function to be called before release. If ``acquire`` is a contextmanager, this will be called *after* exiting the nexted context and before the lock is released. timeout (float): number of seconds to set for the timeout when acquiring the lock (default no timeout) If the ``acquire_fn`` returns a value, it is used as the return value for ``__enter__``, allowing it to be passed as the ``as`` argument of a ``with`` statement. If ``acquire_fn`` returns a context manager, *its* ``__enter__`` function will be called after the lock is acquired, and its ``__exit__`` function will be called before ``release_fn`` in ``__exit__``, allowing you to nest a context manager inside this one. Timeout for lock is customizable. """ def __init__( self, lock: Lock, acquire: Union[ReleaseFnType, ContextManager] = None, release: Union[ReleaseFnType, ContextManager] = None, timeout: Optional[float] = None, ) -> None: self._lock = lock self._timeout = timeout self._acquire_fn = acquire self._release_fn = release self._as = None def __enter__(self): if self._enter() and self._acquire_fn: self._as = self._acquire_fn() if hasattr(self._as, "__enter__"): return self._as.__enter__() else: return self._as def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> bool: suppress = False def release_fn(): if self._release_fn is not None: return self._release_fn(exc_type, exc_value, traceback) if self._as and hasattr(self._as, "__exit__"): if self._as.__exit__(exc_type, exc_value, traceback): suppress = True if self._exit(release_fn): suppress = True return suppress def _enter(self) -> bool: return NotImplemented def _exit(self, release_fn: ReleaseFnType) -> bool: return NotImplemented
LockTransaction
python
langchain-ai__langchain
libs/langchain/langchain_classic/chains/qa_with_sources/vector_db.py
{ "start": 559, "end": 3013 }
class ____(BaseQAWithSourcesChain): """Question-answering with sources over a vector database.""" vectorstore: VectorStore = Field(exclude=True) """Vector Database to connect to.""" k: int = 4 """Number of results to return from store""" reduce_k_below_max_tokens: bool = False """Reduce the number of results to return from store based on tokens limit""" max_tokens_limit: int = 3375 """Restrict the docs to return from store based on tokens, enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true""" search_kwargs: dict[str, Any] = Field(default_factory=dict) """Extra search args.""" def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]: num_docs = len(docs) if self.reduce_k_below_max_tokens and isinstance( self.combine_documents_chain, StuffDocumentsChain, ): tokens = [ self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001 for doc in docs ] token_count = sum(tokens[:num_docs]) while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] @override def _get_docs( self, inputs: dict[str, Any], *, run_manager: CallbackManagerForChainRun, ) -> list[Document]: question = inputs[self.question_key] docs = self.vectorstore.similarity_search( question, k=self.k, **self.search_kwargs, ) return self._reduce_tokens_below_limit(docs) async def _aget_docs( self, inputs: dict[str, Any], *, run_manager: AsyncCallbackManagerForChainRun, ) -> list[Document]: msg = "VectorDBQAWithSourcesChain does not support async" raise NotImplementedError(msg) @model_validator(mode="before") @classmethod def _raise_deprecation(cls, values: dict) -> Any: warnings.warn( "`VectorDBQAWithSourcesChain` is deprecated - " "please use `from langchain_classic.chains import " "RetrievalQAWithSourcesChain`", stacklevel=5, ) return values @property def _chain_type(self) -> str: return "vector_db_qa_with_sources_chain"
VectorDBQAWithSourcesChain
python
allegroai__clearml
clearml/backend_api/services/v2_20/events.py
{ "start": 35286, "end": 38695 }
class ____(NonStrictDataModel): """ :param scroll_id: Scroll ID to pass to the next calls to get_debug_image_sample or next_debug_image_sample :type scroll_id: str :param event: Debug image event :type event: dict :param min_iteration: minimal valid iteration for the variant :type min_iteration: int :param max_iteration: maximal valid iteration for the variant :type max_iteration: int """ _schema = { "properties": { "event": {"description": "Debug image event", "type": ["object", "null"]}, "max_iteration": { "description": "maximal valid iteration for the variant", "type": ["integer", "null"], }, "min_iteration": { "description": "minimal valid iteration for the variant", "type": ["integer", "null"], }, "scroll_id": { "description": "Scroll ID to pass to the next calls to get_debug_image_sample or next_debug_image_sample", "type": ["string", "null"], }, }, "type": "object", } def __init__( self, scroll_id: Optional[str] = None, event: Optional[dict] = None, min_iteration: Optional[int] = None, max_iteration: Optional[int] = None, **kwargs: Any ) -> None: super(DebugImageSampleResponse, self).__init__(**kwargs) self.scroll_id = scroll_id self.event = event self.min_iteration = min_iteration self.max_iteration = max_iteration @schema_property("scroll_id") def scroll_id(self) -> Optional[str]: return self._property_scroll_id @scroll_id.setter def scroll_id(self, value: Optional[str]) -> None: if value is None: self._property_scroll_id = None return self.assert_isinstance(value, "scroll_id", six.string_types) self._property_scroll_id = value @schema_property("event") def event(self) -> Optional[dict]: return self._property_event @event.setter def event(self, value: Optional[dict]) -> None: if value is None: self._property_event = None return self.assert_isinstance(value, "event", (dict,)) self._property_event = value @schema_property("min_iteration") def min_iteration(self) -> Optional[int]: return self._property_min_iteration @min_iteration.setter def min_iteration(self, value: Optional[int]) -> None: if value is None: self._property_min_iteration = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "min_iteration", six.integer_types) self._property_min_iteration = value @schema_property("max_iteration") def max_iteration(self) -> Optional[int]: return self._property_max_iteration @max_iteration.setter def max_iteration(self, value: Optional[int]) -> None: if value is None: self._property_max_iteration = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "max_iteration", six.integer_types) self._property_max_iteration = value
DebugImageSampleResponse
python
doocs__leetcode
solution/1300-1399/1365.How Many Numbers Are Smaller Than the Current Number/Solution.py
{ "start": 0, "end": 165 }
class ____: def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]: arr = sorted(nums) return [bisect_left(arr, x) for x in nums]
Solution
python
getsentry__sentry
tests/sentry/api/test_base.py
{ "start": 3112, "end": 3827 }
class ____(Endpoint): permission_classes = () def get(self, request): values = [x for x in range(0, 100)] def data_fn(offset, limit): page_offset = offset * limit return values[page_offset : page_offset + limit] return self.paginate( request=request, paginator=GenericOffsetPaginator(data_fn), on_results=lambda results: iter(results), response_cls=StreamingHttpResponse, response_kwargs={"content_type": "application/json"}, ) _dummy_endpoint = DummyEndpoint.as_view() _dummy_streaming_endpoint = DummyPaginationStreamingEndpoint.as_view() @all_silo_test
DummyPaginationStreamingEndpoint
python
kamyu104__LeetCode-Solutions
Python/count-collisions-of-monkeys-on-a-polygon.py
{ "start": 69, "end": 247 }
class ____(object): def monkeyMove(self, n): """ :type n: int :rtype: int """ MOD = 10**9+7 return (pow(2, n, MOD)-2)%MOD
Solution
python
tensorflow__tensorflow
tensorflow/python/client/events_writer_test.py
{ "start": 1303, "end": 2780 }
class ____(test_util.TensorFlowTestCase): def testWriteEvents(self): file_prefix = os.path.join(self.get_temp_dir(), "events") writer = _pywrap_events_writer.EventsWriter(compat.as_bytes(file_prefix)) filename = compat.as_text(writer.FileName()) event_written = event_pb2.Event( wall_time=123.45, step=67, summary=summary_pb2.Summary( value=[summary_pb2.Summary.Value( tag="foo", simple_value=89.0)])) writer.WriteEvent(event_written) writer.Flush() writer.Close() with self.assertRaises(errors.NotFoundError): for r in tf_record.tf_record_iterator(filename + "DOES_NOT_EXIST"): self.assertTrue(False) reader = tf_record.tf_record_iterator(filename) event_read = event_pb2.Event() event_read.ParseFromString(next(reader)) self.assertTrue(event_read.HasField("file_version")) event_read.ParseFromString(next(reader)) # Second event self.assertProtoEquals(""" wall_time: 123.45 step: 67 summary { value { tag: 'foo' simple_value: 89.0 } } """, event_read) with self.assertRaises(StopIteration): next(reader) def testWriteEventInvalidType(self): class _Invalid(object): def __str__(self): return "Invalid" with self.assertRaisesRegex(TypeError, "Invalid"): _pywrap_events_writer.EventsWriter(b"foo").WriteEvent(_Invalid()) if __name__ == "__main__": googletest.main()
PywrapeventsWriterTest
python
apache__airflow
providers/microsoft/azure/src/airflow/providers/microsoft/azure/operators/adls.py
{ "start": 1216, "end": 3328 }
class ____(BaseOperator): """ Creates a new object from passed data to Azure Data Lake on specified file. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:ADLSCreateObjectOperator` :param file_system_name: Name of the file system or instance of FileSystemProperties. :param file_name: Name of the file which needs to be created in the file system. :param data: The data that will be uploaded. :param length: Size of the data in bytes (optional). :param replace: Whether to forcibly overwrite existing files/directories. If False and remote path is a directory, will quit regardless if any files would be overwritten or not. If True, only matching filenames are actually overwritten. :param azure_data_lake_conn_id: Reference to the :ref:`Azure Data Lake connection<howto/connection:adl>`. """ template_fields: Sequence[str] = ("file_system_name", "file_name", "data") ui_color = "#e4f0e8" def __init__( self, *, file_system_name: str, file_name: str, data: bytes | str | Iterable[AnyStr] | IO[AnyStr], length: int | None = None, replace: bool = False, azure_data_lake_conn_id: str = DEFAULT_AZURE_DATA_LAKE_CONN_ID, **kwargs, ) -> None: super().__init__(**kwargs) self.file_system_name = file_system_name self.file_name = file_name self.replace = replace self.data = data # type: ignore[var-annotated] self.length = length self.azure_data_lake_conn_id = azure_data_lake_conn_id def execute(self, context: Context) -> dict[str, Any]: self.log.debug("Uploading %s to %s", self.data, self.file_name) hook = AzureDataLakeStorageV2Hook(adls_conn_id=self.azure_data_lake_conn_id) return hook.create_file(file_system_name=self.file_system_name, file_name=self.file_name).upload_data( data=self.data, length=self.length, overwrite=self.replace )
ADLSCreateObjectOperator
python
django-haystack__django-haystack
test_haystack/test_fields.py
{ "start": 16609, "end": 18586 }
class ____(TestCase): def test_init(self): try: foo = CharField(use_template=True) except: self.fail() try: foo = CharField(use_template=True, template_name="foo.txt") except: self.fail() foo = CharField(use_template=True, template_name="foo.txt") self.assertEqual(foo.template_name, "foo.txt") # Test the select_template usage. foo = CharField(use_template=True, template_name=["bar.txt", "foo.txt"]) self.assertEqual(foo.template_name, ["bar.txt", "foo.txt"]) def test_prepare(self): mock = MockModel() mock.pk = 1 mock.user = "daniel" template1 = CharField(use_template=True) self.assertRaises(SearchFieldError, template1.prepare, mock) template2 = CharField(use_template=True) template2.instance_name = "template_x" self.assertRaises(TemplateDoesNotExist, template2.prepare, mock) template3 = CharField(use_template=True) template3.instance_name = "template" self.assertEqual(template3.prepare(mock), "Indexed!\n1\n") template4 = CharField(use_template=True, template_name="search/indexes/foo.txt") template4.instance_name = "template" self.assertEqual(template4.prepare(mock), "FOO!\n") template5 = CharField( use_template=True, template_name=["foo.txt", "search/indexes/bar.txt"] ) template5.instance_name = "template" self.assertEqual(template5.prepare(mock), "BAR!\n") ############################################################################## # The following tests look like they don't do much, but it's important because # we need to verify that the faceted variants behave like the field they # emulate. The old-broke behavior was convert everything to string. ##############################################################################
CharFieldWithTemplateTestCase
python
ray-project__ray
python/ray/dag/collective_node.py
{ "start": 566, "end": 10115 }
class ____: """ Represent metadata for a collective communicator collective operation. Args: inputs: A list of lists of DAGNode. Each nested list inside of inputs should contain exactly one object per actor. If multiple nested lists are provided, then the order of actors should be the same for each nested list. op: The collective operation to perform. transport: The transport to use for the collective operation. Requirements: 1. Input nodes are unique. 2. Actor handles are unique. 3. Actor handles match the custom communicator group if specified. """ def __init__( self, inputs: List[List[DAGNode]], op: _CollectiveOp, transport: Optional[Union[str, Communicator]] = None, ): self._actor_handles: List["ray.actor.ActorHandle"] = [] for i, input_nodes in enumerate(inputs): # Check non-empty input list if len(input_nodes) == 0: nested_list_error_msg = f" at index {i}" if len(inputs) > 1 else "" raise ValueError( f"Expected non-empty input list{nested_list_error_msg}." ) # Check input nodes are DAGNode if not all(isinstance(node, DAGNode) for node in input_nodes): nested_list_error_msg = ( f" at list at index {i}" if len(inputs) > 1 else "" ) raise ValueError( f"Expected all input nodes to be DAGNode{nested_list_error_msg}, " f"but got {input_nodes}." ) # Check unique input nodes if len(set(input_nodes)) != len(input_nodes): duplicates = [ input_node for input_node in input_nodes if input_nodes.count(input_node) > 1 ] nested_list_error_msg = ( f" at list at index {i}" if len(inputs) > 1 else "" ) raise ValueError( f"Expected unique input nodes{nested_list_error_msg}, but found duplicates: " f"{duplicates}" ) current_actor_handles = [] for input_node in input_nodes: actor_handle = input_node._get_actor_handle() if actor_handle is None: nested_list_error_msg = ( f" at list at index {i}" if len(inputs) > 1 else "" ) raise ValueError( f"Expected an actor handle from the input node{nested_list_error_msg}" ) current_actor_handles.append(actor_handle) # Check unique actor handles if len(set(current_actor_handles)) != len(current_actor_handles): invalid_input_nodes = [ input_node for input_node in input_nodes if current_actor_handles.count(input_node._get_actor_handle()) > 1 ] nested_list_error_msg = ( f" at list at index {i}" if len(inputs) > 1 else "" ) raise ValueError( f"Expected unique actor handles{nested_list_error_msg}, " "but found duplicate actor handles from input nodes: " f"{invalid_input_nodes}" ) if i == 0: first_actor_handles = current_actor_handles # Check all lists of DAGNode have the same number of nodes if len(inputs[0]) != len(inputs[i]): raise ValueError( f"Expected all input lists to have the same number of nodes. " f"List at index 0 has length {len(inputs[0])}, but list at " f"index {i} has length {len(inputs[i])}." ) # Check all lists of DAGNode have same set of actor handles if set(first_actor_handles) != set(current_actor_handles): raise ValueError( f"Expected all input lists to have the same set of actor handles. " f"List at index 0 has actors {set(first_actor_handles)}, but list at " f"index {i} has actors {set(current_actor_handles)}." ) # Check all lists of DAGNode have same order of actor handles for j, (first, current) in enumerate( zip(first_actor_handles, current_actor_handles) ): if first != current: raise ValueError( f"Expected all input lists to have the same order of actor handles. " f"List at index 0 has actor {first} at position {j}, but list at " f"index {i} has actor {current} at position {j}." ) self._actor_handles = current_actor_handles self._op = op if transport is None: transport = TorchTensorType.ACCELERATOR self._type_hint = TorchTensorType(transport=transport, _direct_return=True) if isinstance(transport, Communicator): if set(transport.get_actor_handles()) != set(self._actor_handles): raise ValueError( "Expected actor handles to match the custom communicator group" ) def __str__(self) -> str: return ( f"CollectiveOperation(" f"_actor_handles={self._actor_handles}, " f"_op={self._op}, " f"_type_hint={self._type_hint})" ) @property def actor_handles(self) -> List["ray.actor.ActorHandle"]: return self._actor_handles @property def type_hint(self) -> TorchTensorType: return self._type_hint def get_communicator(self) -> Communicator: if self._type_hint.communicator_id is not None: ctx = ChannelContext.get_current() communicator = ctx.communicators[self._type_hint.communicator_id] elif self._type_hint.get_custom_communicator() is not None: communicator = self._type_hint.get_custom_communicator() else: raise ValueError("Expected a communicator group") return communicator def execute( self, *send_buf: "torch.Tensor" ) -> Union["torch.Tensor", Tuple["torch.Tensor", ...]]: """ Call the collective operation on the input tensor(s). Output tensor(s) are allocated and returned. Args: *send_buf: A variable number of torch tensors to send to the collective operation. The tensors have the same order as the input nodes. Returns: A torch tensor or a tuple of torch tensors containing the results of the collective operation. The output tensors have the same length and order as the input node list of the actor of this operation. """ import torch if not all(isinstance(t, torch.Tensor) for t in send_buf): raise ValueError("Expected a torch tensor for each input node") communicator = self.get_communicator() if isinstance(self._op, AllGatherOp): assert len(send_buf) == 1 t = send_buf[0] world_size = len(self._actor_handles) recv_buf = torch.empty( (t.shape[0] * world_size, *t.shape[1:]), dtype=t.dtype, device=t.device, ) communicator.allgather(t, recv_buf) elif isinstance(self._op, AllReduceOp): if len(send_buf) == 1: t = send_buf[0] recv_buf = torch.empty_like(t) communicator.allreduce(t, recv_buf, self._op.reduceOp) else: if not all(t.dtype == send_buf[0].dtype for t in send_buf): raise ValueError( "Expected all input tensors to have the same dtype, " f"but got {[t.dtype for t in send_buf]}" ) def unflatten_from(flat_buf, bufs): views = [] offset = 0 for t in bufs: numel = t.numel() t = flat_buf[offset : offset + numel].view(t.shape) views.append(t) offset += numel return tuple(views) flat_buf = torch.nn.utils.parameters_to_vector(send_buf) communicator.allreduce(flat_buf, flat_buf, self._op.reduceOp) recv_buf = unflatten_from(flat_buf, send_buf) elif isinstance(self._op, ReduceScatterOp): assert len(send_buf) == 1 t = send_buf[0] world_size = len(self._actor_handles) if t.shape[0] % world_size != 0: raise ValueError( "Expected the first dimension of the input tensor to be divisible " f"by the world size {world_size}" ) recv_buf = torch.empty( (t.shape[0] // world_size, *t.shape[1:]), dtype=t.dtype, device=t.device, ) communicator.reducescatter(t, recv_buf, self._op.reduceOp) return recv_buf @DeveloperAPI
_CollectiveOperation
python
scikit-learn__scikit-learn
sklearn/externals/_arff.py
{ "start": 13624, "end": 13809 }
class ____(ArffException): '''Error raised when and invalid numerical value is used in some data instance.''' message = 'Invalid numerical value, at line %d.'
BadNumericalValue
python
huggingface__transformers
src/transformers/models/pop2piano/modeling_pop2piano.py
{ "start": 27674, "end": 42423 }
class ____(Pop2PianoPreTrainedModel): # Copied from transformers.models.t5.modeling_t5.T5Stack.__init__ with T5->Pop2Piano,t5->pop2piano def __init__(self, config): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) self.is_decoder = config.is_decoder self.block = nn.ModuleList( [ Pop2PianoBlock(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers) ] ) self.final_layer_norm = Pop2PianoLayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache( DynamicCache(config=self.config), DynamicCache(config=self.config) ) else: past_key_values = DynamicCache(config=self.config) elif not self.is_decoder: # do not pass cache object down the line for encoder stack # it messes indexing later in decoder-stack because cache object is modified in-place past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if attention_mask is None and not is_torchdynamo_compiling(): # required mask seq length can be calculated via length of past cache mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.config.is_decoder: causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions, ) else: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=inputs_embeds.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, # as a positional argument for gradient checkpointing past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = layer_outputs[0] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[1] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask
Pop2PianoStack
python
getsentry__sentry
src/sentry/api/paginator.py
{ "start": 19959, "end": 20736 }
class ____: is_empty = False def __init__(self, queryset, order_by): assert isinstance(order_by, list), "order_by must be a list of keys/field names" self.queryset = queryset self.order_by = order_by try: instance = queryset[:1].get() self.instance_type = type(instance) for key in self.order_by: self._assert_has_field(instance, key) self.order_by_type = type(getattr(instance, self.order_by[0])) except ObjectDoesNotExist: self.is_empty = True def _assert_has_field(self, instance, field): assert hasattr( instance, field ), f"Model of type {self.instance_type} does not have field {field}"
CombinedQuerysetIntermediary
python
dagster-io__dagster
python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py
{ "start": 11208, "end": 12298 }
class ____(TypedDict): registry: Optional[str] directory: Optional[str] def merge_build_configs( workspace_build_config: Optional[DgRawBuildConfig], project_build_config: Optional[DgRawBuildConfig], ) -> DgRawBuildConfig: project_dict = remove_none_recursively(project_build_config or {}) workspace_dict = remove_none_recursively(workspace_build_config or {}) return cast( "DgRawBuildConfig", deep_merge_dicts(workspace_dict, project_dict), ) def merge_container_context_configs( workspace_container_context_config: Optional[Mapping[str, Any]], project_container_context_config: Optional[Mapping[str, Any]], ) -> Mapping[str, Any]: # defer for import performance from dagster_cloud_cli.config import DagsterCloudConfigDefaultsMerger merger = DagsterCloudConfigDefaultsMerger() return merger.merge( {**workspace_container_context_config} if workspace_container_context_config else {}, {**project_container_context_config} if project_container_context_config else {}, ) @dataclass
DgRawBuildConfig