language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/distributions/constraints.py | {
"start": 11552,
"end": 12043
} | class ____(Constraint):
"""
Constrain to an integer interval `(-inf, upper_bound]`.
"""
is_discrete = True
def __init__(self, upper_bound):
self.upper_bound = upper_bound
super().__init__()
def check(self, value):
return (value % 1 == 0) & (value <= self.upper_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += f"(upper_bound={self.upper_bound})"
return fmt_string
| _IntegerLessThan |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1567405,
"end": 1568890
} | class ____(VegaLiteSchema):
"""
ValueDefWithConditionMarkPropFieldOrDatumDefstringnull schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalMarkPropFieldOrDatumDef`, :class:`ConditionalValueDefstringnullExprRef`, :class:`ConditionalParameterMarkPropFieldOrDatumDef`, :class:`ConditionalPredicateMarkPropFieldOrDatumDef`, :class:`ConditionalParameterValueDefstringnullExprRef`, :class:`ConditionalPredicateValueDefstringnullExprRef`, Sequence[dict, :class:`ConditionalValueDefstringnullExprRef`, :class:`ConditionalParameterValueDefstringnullExprRef`, :class:`ConditionalPredicateValueDefstringnullExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : str, dict, :class:`ExprRef`, None
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_schema = {
"$ref": "#/definitions/ValueDefWithCondition<MarkPropFieldOrDatumDef,(string|null)>"
}
def __init__(
self,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
value: Optional[str | Parameter | SchemaBase | Map | None] = Undefined,
**kwds,
):
super().__init__(condition=condition, value=value, **kwds)
| ValueDefWithConditionMarkPropFieldOrDatumDefstringnull |
python | getsentry__sentry | src/sentry/shared_integrations/exceptions/__init__.py | {
"start": 4273,
"end": 4327
} | class ____(ApiError):
code = 429
| ApiRateLimitedError |
python | walkccc__LeetCode | solutions/187. Repeated DNA Sequences/187.py | {
"start": 0,
"end": 250
} | class ____:
def findRepeatedDnaSequences(self, s: str) -> list[str]:
ans = set()
seen = set()
for i in range(len(s) - 9):
seq = s[i:i + 10]
if seq in seen:
ans.add(seq)
seen.add(seq)
return list(ans)
| Solution |
python | getsentry__sentry | tests/sentry/grouping/test_enhancer.py | {
"start": 17709,
"end": 29073
} | class ____(TestCase):
def setUp(self) -> None:
self.rules_text = """
function:sit +app # should end up in classifiers
function:roll_over category=trick # should end up in classifiers
function:shake +group # should end up in contributes
function:lie_down max-frames=11 # should end up in contributes
function:stay min-frames=12 # should end up in contributes
function:kangaroo -app -group # should end up in both
"""
def test_differentiates_between_classifier_and_contributes_rules(self) -> None:
rules = parse_enhancements(self.rules_text)
expected_results = [
# (has_classifier_actions, has_contributes_actions, classifier_actions, contributes_actions)
(True, False, ["+app"], None),
(True, False, ["category=trick"], None),
(False, True, None, ["+group"]),
(False, True, None, ["max-frames=11"]),
(False, True, None, ["min-frames=12"]),
(True, True, ["-app"], ["-group"]),
]
for i, expected in enumerate(expected_results):
(
expected_has_classifier_actions_value,
expected_has_contributes_actions_value,
expected_as_classifier_rule_actions,
expected_as_contributes_rule_actions,
) = expected
rule = rules[i]
classifier_rule = rule.as_classifier_rule()
classifier_rule_actions = (
[str(action) for action in classifier_rule.actions] if classifier_rule else None
)
contributes_rule = rule.as_contributes_rule()
contributes_rule_actions = (
[str(action) for action in contributes_rule.actions] if contributes_rule else None
)
assert rule.has_classifier_actions == expected_has_classifier_actions_value
assert rule.has_contributes_actions == expected_has_contributes_actions_value
assert classifier_rule_actions == expected_as_classifier_rule_actions
assert contributes_rule_actions == expected_as_contributes_rule_actions
def test_splits_rules_correctly(self) -> None:
enhancements = EnhancementsConfig.from_rules_text(self.rules_text, version=3)
assert [rule.text for rule in enhancements.classifier_rules] == [
"function:sit +app",
"function:roll_over category=trick",
"function:kangaroo -app", # Split of `function:kangaroo -app -group`
]
assert [rule.text for rule in enhancements.contributes_rules] == [
"function:shake +group",
"function:lie_down max-frames=11",
"function:stay min-frames=12",
"function:kangaroo -group", # Split of `function:kangaroo -app -group`
]
def test_adds_split_rules_to_base_enhancements(self) -> None:
for base in ENHANCEMENT_BASES.values():
# Make these sets so checking in them is faster
classifier_rules = set(base.classifier_rules)
contributes_rules = set(base.contributes_rules)
for rule in base.rules:
if rule.has_classifier_actions:
assert rule.as_classifier_rule() in classifier_rules
if rule.has_contributes_actions:
assert rule.as_contributes_rule() in contributes_rules
@patch("sentry.grouping.enhancer.parse_enhancements", wraps=parse_enhancements)
def test_caches_enhancements(self, parse_enhancements_spy: MagicMock) -> None:
self.project.update_option(
"sentry:grouping_enhancements", "stack.function:recordMetrics +app -group"
)
get_grouping_config_dict_for_project(self.project)
assert parse_enhancements_spy.call_count == 1
get_grouping_config_dict_for_project(self.project)
# We didn't parse again because the result was cached
assert parse_enhancements_spy.call_count == 1
@patch("sentry.grouping.enhancer.parse_enhancements", wraps=parse_enhancements)
def test_caches_split_enhancements(self, parse_enhancements_spy: MagicMock) -> None:
self.project.update_option("sentry:grouping_enhancements", "function:playFetch +app +group")
# Using version 3 forces the enhancements to be split, and we know a split will happen
# because the custom rule added above has both an in-app and a contributes action
with patch("sentry.grouping.api.get_enhancements_version", return_value=3):
get_grouping_config_dict_for_project(self.project)
assert parse_enhancements_spy.call_count == 1
get_grouping_config_dict_for_project(self.project)
# We didn't parse again because the result was cached
assert parse_enhancements_spy.call_count == 1
def test_loads_enhancements_from_base64_string(self) -> None:
enhancements = EnhancementsConfig.from_rules_text("function:playFetch +app")
assert len(enhancements.rules) == 1
assert str(enhancements.rules[0]) == "<EnhancementRule function:playFetch +app>"
assert enhancements.id is None
strategy_config = load_grouping_config(
{"id": DEFAULT_GROUPING_CONFIG, "enhancements": enhancements.base64_string}
)
assert len(strategy_config.enhancements.rules) == 1
assert str(enhancements.rules[0]) == "<EnhancementRule function:playFetch +app>"
assert strategy_config.enhancements.id is None
@patch("sentry.grouping.enhancer._split_rules", wraps=_split_rules)
def test_loads_split_enhancements_from_base64_string(self, split_rules_spy: MagicMock) -> None:
# Using version 3 forces the enhancements to be split, and we know a split will happen
# because the rule below has both an in-app and a contributes action
enhancements = EnhancementsConfig.from_rules_text(
"function:playFetch +app +group", version=3
)
assert len(enhancements.rules) == 1
assert len(enhancements.classifier_rules) == 1
assert len(enhancements.contributes_rules) == 1
assert str(enhancements.rules[0]) == "<EnhancementRule function:playFetch +app +group>"
assert str(enhancements.classifier_rules[0]) == "<EnhancementRule function:playFetch +app>"
assert (
str(enhancements.contributes_rules[0]) == "<EnhancementRule function:playFetch +group>"
)
assert enhancements.id is None
assert split_rules_spy.call_count == 1
strategy_config = load_grouping_config(
{"id": DEFAULT_GROUPING_CONFIG, "enhancements": enhancements.base64_string}
)
assert len(strategy_config.enhancements.rules) == 1
assert len(strategy_config.enhancements.classifier_rules) == 1
assert len(strategy_config.enhancements.contributes_rules) == 1
assert (
str(strategy_config.enhancements.rules[0])
== "<EnhancementRule function:playFetch +app +group>"
)
assert (
str(strategy_config.enhancements.classifier_rules[0])
== "<EnhancementRule function:playFetch +app>"
)
assert (
str(strategy_config.enhancements.contributes_rules[0])
== "<EnhancementRule function:playFetch +group>"
)
assert strategy_config.enhancements.id is None
# Rules didn't have to be split again because they were cached in split form
assert split_rules_spy.call_count == 1
def test_uses_default_enhancements_when_loading_string_with_invalid_version(self) -> None:
enhancements = EnhancementsConfig.from_rules_text("function:playFetch +app")
assert len(enhancements.rules) == 1
assert str(enhancements.rules[0]) == "<EnhancementRule function:playFetch +app>"
assert enhancements.id is None
# Version 1 no longer exists
enhancements.version = 1
strategy_config = load_grouping_config(
{"id": DEFAULT_GROUPING_CONFIG, "enhancements": enhancements.base64_string}
)
assert len(strategy_config.enhancements.rules) > 1
assert "<EnhancementRule function:playFetch +app>" not in {
str(rule) for rule in strategy_config.enhancements.rules
}
assert strategy_config.enhancements.id == DEFAULT_ENHANCEMENTS_BASE
# TODO: This and `test_base64_string_with_old_enhancements_name_runs_default_rules` are here in
# order to test the temporary shim in the enhancements module which makes the default
# enhancements able to be looked up by their old name. Once that's removed (once the relevat
# events have aged out, after Nov 2025), these tests can be removed as well.
def test_successfully_loads_base64_string_with_old_enhancements_name(self) -> None:
enhancements = EnhancementsConfig.from_rules_text(
"function:playFetch +app", bases=["newstyle:2023-01-11"]
)
assert len(enhancements.rules) == 1
assert str(enhancements.rules[0]) == "<EnhancementRule function:playFetch +app>"
assert enhancements.id is None
assert enhancements.bases == ["newstyle:2023-01-11"]
strategy_config = load_grouping_config(
{"id": DEFAULT_GROUPING_CONFIG, "enhancements": enhancements.base64_string}
)
assert len(strategy_config.enhancements.rules) == 1
assert str(enhancements.rules[0]) == "<EnhancementRule function:playFetch +app>"
assert strategy_config.enhancements.id is None
assert strategy_config.enhancements.bases == ["newstyle:2023-01-11"]
def test_base64_string_with_old_enhancements_name_runs_default_rules(self) -> None:
old_name_enhancements = EnhancementsConfig.from_rules_text(
"", bases=["newstyle:2023-01-11"]
)
default_enhancements = EnhancementsConfig.from_rules_text(
"", bases=["all-platforms:2023-01-11"]
)
old_name_strategy_config = load_grouping_config(
{"id": DEFAULT_GROUPING_CONFIG, "enhancements": old_name_enhancements.base64_string}
)
default_strategy_config = load_grouping_config(
{"id": DEFAULT_GROUPING_CONFIG, "enhancements": default_enhancements.base64_string}
)
# Internal Node function, should get marked out of app by our default rules
frame1: dict[str, Any] = {"function": "nextTick", "filename": "dogs/are/great.js"}
frame2: dict[str, Any] = {"function": "nextTick", "filename": "dogs/are/great.js"}
old_name_strategy_config.enhancements.apply_category_and_updated_in_app_to_frames(
[frame1], "node", {}
)
default_strategy_config.enhancements.apply_category_and_updated_in_app_to_frames(
[frame2], "node", {}
)
# Enhancements with the old name behave the same as our default enhancements
assert frame1["in_app"] is False
assert frame2["in_app"] is False
# Note: This primarily tests `assemble_stacktrace_component`'s handling of `contributes` values, as
# hints are tested separately in `test_hints.py`.
| EnhancementsTest |
python | mlflow__mlflow | dev/check_function_signatures.py | {
"start": 8369,
"end": 12530
} | class ____(ast.NodeVisitor):
def __init__(self):
self.functions: dict[str, ast.FunctionDef | ast.AsyncFunctionDef] = {}
self.stack: list[ast.ClassDef] = []
def visit_ClassDef(self, node: ast.ClassDef) -> None:
self.stack.append(node)
self.generic_visit(node)
self.stack.pop()
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
# Is this a private function or a function in a private class?
# If so, skip it.
if _is_private(node.name) or (self.stack and _is_private(self.stack[-1].name)):
return
names = [*(c.name for c in self.stack), node.name]
self.functions[".".join(names)] = node
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
if _is_private(node.name) or (self.stack and _is_private(self.stack[-1].name)):
return
names = [*(c.name for c in self.stack), node.name]
self.functions[".".join(names)] = node
def get_changed_python_files(base_branch: str = "master") -> list[Path]:
# In GitHub Actions PR context, we need to fetch the base branch first
if is_github_actions():
# Fetch the base branch to ensure we have it locally
subprocess.check_call(
["git", "fetch", "origin", f"{base_branch}:{base_branch}"],
)
result = subprocess.check_output(
["git", "diff", "--name-only", f"{base_branch}...HEAD"], text=True
)
files = [s.strip() for s in result.splitlines()]
return [Path(f) for f in files if f]
def parse_functions(content: str) -> dict[str, ast.FunctionDef | ast.AsyncFunctionDef]:
tree = ast.parse(content)
extractor = FunctionSignatureExtractor()
extractor.visit(tree)
return extractor.functions
def get_file_content_at_revision(file_path: Path, revision: str) -> str | None:
try:
return subprocess.check_output(["git", "show", f"{revision}:{file_path}"], text=True)
except subprocess.CalledProcessError as e:
print(f"Warning: Failed to get file content at revision: {e}", file=sys.stderr)
return None
def compare_signatures(base_branch: str = "master") -> list[Error]:
errors: list[Error] = []
for file_path in get_changed_python_files(base_branch):
# Ignore non-Python files
if not file_path.suffix == ".py":
continue
# Ignore files not in the mlflow directory
if file_path.parts[0] != "mlflow":
continue
# Ignore private modules
if any(part.startswith("_") for part in file_path.parts):
continue
base_content = get_file_content_at_revision(file_path, base_branch)
if base_content is None:
# Find not found in the base branch, likely added in the current branch
continue
if not file_path.exists():
# File not found, likely deleted in the current branch
continue
current_content = file_path.read_text()
base_functions = parse_functions(base_content)
current_functions = parse_functions(current_content)
for func_name in set(base_functions.keys()) & set(current_functions.keys()):
base_func = base_functions[func_name]
current_func = current_functions[func_name]
if param_errors := check_signature_compatibility(base_func, current_func):
# Create individual errors for each problematic parameter
errors.extend(
Error(
file_path=file_path,
line=param_error.lineno,
column=param_error.col_offset + 1,
lines=[
"[Non-blocking | Ignore if not public API]",
param_error.message,
f"This change will break existing `{func_name}` calls.",
"If this is not intended, please fix it.",
],
)
for param_error in param_errors
)
return errors
@dataclass
| FunctionSignatureExtractor |
python | doocs__leetcode | solution/1100-1199/1128.Number of Equivalent Domino Pairs/Solution.py | {
"start": 0,
"end": 276
} | class ____:
def numEquivDominoPairs(self, dominoes: List[List[int]]) -> int:
cnt = Counter()
ans = 0
for a, b in dominoes:
x = a * 10 + b if a < b else b * 10 + a
ans += cnt[x]
cnt[x] += 1
return ans
| Solution |
python | sqlalchemy__sqlalchemy | examples/asyncio/async_orm.py | {
"start": 679,
"end": 731
} | class ____(AsyncAttrs, DeclarativeBase):
pass
| Base |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 23955,
"end": 24922
} | class ____(sgqlc.types.Enum):
"""The possible funding platforms for repository funding links.
Enumeration Choices:
* `COMMUNITY_BRIDGE`: Community Bridge funding platform.
* `CUSTOM`: Custom funding platform.
* `GITHUB`: GitHub funding platform.
* `ISSUEHUNT`: IssueHunt funding platform.
* `KO_FI`: Ko-fi funding platform.
* `LFX_CROWDFUNDING`: LFX Crowdfunding funding platform.
* `LIBERAPAY`: Liberapay funding platform.
* `OPEN_COLLECTIVE`: Open Collective funding platform.
* `OTECHIE`: Otechie funding platform.
* `PATREON`: Patreon funding platform.
* `TIDELIFT`: Tidelift funding platform.
"""
__schema__ = github_schema
__choices__ = (
"COMMUNITY_BRIDGE",
"CUSTOM",
"GITHUB",
"ISSUEHUNT",
"KO_FI",
"LFX_CROWDFUNDING",
"LIBERAPAY",
"OPEN_COLLECTIVE",
"OTECHIE",
"PATREON",
"TIDELIFT",
)
| FundingPlatform |
python | ansible__ansible | lib/ansible/modules/dnf5.py | {
"start": 16805,
"end": 32677
} | class ____(YumDnf):
def __init__(self, module):
super(Dnf5Module, self).__init__(module)
self.auto_install_module_deps = self.module.params["auto_install_module_deps"]
self._ensure_dnf()
self.pkg_mgr_name = "dnf5"
def fail_on_non_existing_plugins(self, base):
# https://github.com/rpm-software-management/dnf5/issues/1460
try:
plugin_names = [p.get_name() for p in base.get_plugins_info()]
except AttributeError:
# plugins functionality requires python3-libdnf5 5.2.0.0+
# silently ignore here, the module will fail later when
# base.enable_disable_plugins is attempted to be used if
# user specifies enable_plugin/disable_plugin
return
msg = []
if enable_unmatched := set(self.enable_plugin).difference(plugin_names):
msg.append(
f"No matches were found for the following plugin name patterns while enabling libdnf5 plugins: {', '.join(enable_unmatched)}."
)
if disable_unmatched := set(self.disable_plugin).difference(plugin_names):
msg.append(
f"No matches were found for the following plugin name patterns while disabling libdnf5 plugins: {', '.join(disable_unmatched)}."
)
if msg:
self.module.fail_json(msg=" ".join(msg))
def _ensure_dnf(self):
locale = get_best_parsable_locale(self.module)
os.environ["LC_ALL"] = os.environ["LC_MESSAGES"] = locale
os.environ["LANGUAGE"] = os.environ["LANG"] = locale
global libdnf5
global LIBDNF5_ERRORS
has_dnf = True
try:
import libdnf5 # type: ignore[import]
except ImportError:
has_dnf = False
try:
import libdnf5.exception # type: ignore[import-not-found]
LIBDNF5_ERRORS = (libdnf5.exception.Error, libdnf5.exception.NonLibdnf5Exception)
except (ImportError, AttributeError):
pass
if has_dnf:
return
system_interpreters = [
"/usr/libexec/platform-python",
"/usr/bin/python3",
"/usr/bin/python",
]
if not has_respawned():
for attempt in (1, 2):
# probe well-known system Python locations for accessible bindings
interpreter = probe_interpreters_for_module(system_interpreters, "libdnf5")
if interpreter:
# respawn under the interpreter where the bindings should be found
respawn_module(interpreter)
# end of the line for this module, the process will exit here once the respawned module completes
if attempt == 1:
if self.module.check_mode:
self.module.fail_json(
msg="python3-libdnf5 must be installed to use check mode. "
"If run normally this module can auto-install it, "
"see the auto_install_module_deps option.",
)
elif self.auto_install_module_deps:
self.module.run_command(["dnf", "install", "-y", "python3-libdnf5"], check_rc=True)
else:
break
py_version = sys.version.replace("\n", "")
self.module.fail_json(
msg=f"Could not import the libdnf5 python module using {sys.executable} ({py_version}). "
"Ensure python3-libdnf5 package is installed (either manually or via the auto_install_module_deps option) "
f"or that you have specified the correct ansible_python_interpreter. (attempted {system_interpreters}).",
failures=[],
)
def run(self):
if not self.list and not self.download_only and os.geteuid() != 0:
self.module.fail_json(
msg="This command has to be run under the root user.",
failures=[],
rc=1,
)
base = libdnf5.base.Base()
conf = base.get_config()
if self.conf_file:
conf.config_file_path = self.conf_file
base.load_config()
if self.releasever is not None:
variables = base.get_vars()
variables.set("releasever", self.releasever)
if self.exclude:
conf.excludepkgs = self.exclude
if self.disable_excludes:
if self.disable_excludes == "all":
self.disable_excludes = "*"
conf.disable_excludes = self.disable_excludes
conf.skip_broken = self.skip_broken
# best and nobest are mutually exclusive
if self.nobest is not None:
conf.best = not self.nobest
elif self.best is not None:
conf.best = self.best
conf.install_weak_deps = self.install_weak_deps
try:
# raises AttributeError only on getter if not available
conf.pkg_gpgcheck # pylint: disable=pointless-statement
except AttributeError:
# dnf5 < 5.2.7.0
conf.gpgcheck = not self.disable_gpg_check
else:
conf.pkg_gpgcheck = not self.disable_gpg_check
conf.localpkg_gpgcheck = not self.disable_gpg_check
conf.sslverify = self.sslverify
conf.clean_requirements_on_remove = self.autoremove
if not os.path.isdir(self.installroot):
self.module.fail_json(msg=f"Installroot {self.installroot} must be a directory")
conf.installroot = self.installroot
conf.use_host_config = True # needed for installroot
conf.cacheonly = "all" if self.cacheonly else "none"
if self.download_dir:
conf.destdir = self.download_dir
if self.enable_plugin:
try:
base.enable_disable_plugins(self.enable_plugin, True)
except AttributeError:
self.module.fail_json(msg="'enable_plugin' requires python3-libdnf5 5.2.0.0+")
if self.disable_plugin:
try:
base.enable_disable_plugins(self.disable_plugin, False)
except AttributeError:
self.module.fail_json(msg="'disable_plugin' requires python3-libdnf5 5.2.0.0+")
base.setup()
# https://github.com/rpm-software-management/dnf5/issues/1460
self.fail_on_non_existing_plugins(base)
log_router = base.get_logger()
global_logger = libdnf5.logger.GlobalLogger()
global_logger.set(log_router.get(), libdnf5.logger.Logger.Level_DEBUG)
# FIXME hardcoding the filename does not seem right, should libdnf5 expose the default file name?
logger = libdnf5.logger.create_file_logger(base, "dnf5.log")
log_router.add_logger(logger)
if self.update_cache:
repo_query = libdnf5.repo.RepoQuery(base)
repo_query.filter_type(libdnf5.repo.Repo.Type_AVAILABLE)
for repo in repo_query:
repo_dir = repo.get_cachedir()
if os.path.exists(repo_dir):
repo_cache = libdnf5.repo.RepoCache(base, repo_dir)
repo_cache.write_attribute(libdnf5.repo.RepoCache.ATTRIBUTE_EXPIRED)
sack = base.get_repo_sack()
sack.create_repos_from_system_configuration()
repo_query = libdnf5.repo.RepoQuery(base)
if self.disablerepo:
repo_query.filter_id(self.disablerepo, libdnf5.common.QueryCmp_IGLOB)
for repo in repo_query:
repo.disable()
if self.enablerepo:
repo_query.filter_id(self.enablerepo, libdnf5.common.QueryCmp_IGLOB)
for repo in repo_query:
repo.enable()
try:
sack.load_repos()
except AttributeError:
# dnf5 < 5.2.0.0
sack.update_and_load_enabled_repos(True)
if self.update_cache and not self.names and not self.list:
self.module.exit_json(
msg="Cache updated",
changed=False,
results=[],
rc=0
)
if self.list:
command = self.list
if command == "updates":
command = "upgrades"
if command in {"installed", "upgrades", "available"}:
query = libdnf5.rpm.PackageQuery(base)
getattr(query, "filter_{}".format(command))()
results = [package_to_dict(package) for package in query]
elif command in {"repos", "repositories"}:
query = libdnf5.repo.RepoQuery(base)
query.filter_enabled(True)
results = [{"repoid": repo.get_id(), "state": "enabled"} for repo in query]
else:
resolve_spec_settings = libdnf5.base.ResolveSpecSettings()
query = libdnf5.rpm.PackageQuery(base)
query.resolve_pkg_spec(command, resolve_spec_settings, True)
results = [package_to_dict(package) for package in query]
self.module.exit_json(msg="", results=results, rc=0)
settings = libdnf5.base.GoalJobSettings()
try:
settings.set_group_with_name(True)
settings.set_with_binaries(False)
except AttributeError:
# dnf5 < 5.2.0.0
settings.group_with_name = True
settings.with_binaries = False
if self.bugfix or self.security:
advisory_query = libdnf5.advisory.AdvisoryQuery(base)
types = []
if self.bugfix:
types.append("bugfix")
if self.security:
types.append("security")
advisory_query.filter_type(types)
conf.skip_unavailable = True # ignore packages that are of a different type, for backwards compat
settings.set_advisory_filter(advisory_query)
goal = libdnf5.base.Goal(base)
results = []
if self.names == ["*"] and self.state == "latest":
goal.add_rpm_upgrade(settings)
elif self.state in {"installed", "present", "latest"}:
upgrade = self.state == "latest"
# FIXME use `is_glob_pattern` function when available:
# https://github.com/rpm-software-management/dnf5/issues/1563
glob_patterns = set("*[?")
for spec in self.names:
if any(set(char) & glob_patterns for char in spec):
# Special case for package specs that contain glob characters.
# For these we skip `is_installed` and `is_newer_version_installed` tests that allow for the
# allow_downgrade feature and pass the package specs to dnf.
# Since allow_downgrade is not available in dnf and while it is relatively easy to implement it for
# package specs that evaluate to a single package, trying to mimic what would the dnf machinery do
# for glob package specs and then filtering those for allow_downgrade appears to always
# result in naive/inferior solution.
# TODO reasearch how feasible it is to implement the above
if upgrade:
# for upgrade we pass the spec to both upgrade and install, to satisfy both available and installed
# packages evaluated from the glob spec
goal.add_upgrade(spec, settings)
if not self.update_only:
goal.add_install(spec, settings)
elif is_newer_version_installed(base, spec):
if self.allow_downgrade:
goal.add_install(spec, settings)
elif is_installed(base, spec):
if upgrade:
goal.add_upgrade(spec, settings)
else:
if self.update_only:
results.append("Packages providing {} not installed due to update_only specified".format(spec))
else:
goal.add_install(spec, settings)
elif self.state in {"absent", "removed"}:
for spec in self.names:
goal.add_remove(spec, settings)
if self.autoremove:
for pkg in get_unneeded_pkgs(base):
goal.add_rpm_remove(pkg, settings)
goal.set_allow_erasing(self.allowerasing)
transaction = goal.resolve()
if transaction.get_problems():
failures = []
for log_event in transaction.get_resolve_logs():
if log_event.get_problem() == libdnf5.base.GoalProblem_NOT_FOUND and self.state in {"installed", "present", "latest"}:
# NOTE dnf module compat
failures.append("No package {} available.".format(log_event.get_spec()))
else:
failures.append(log_event.to_string())
if transaction.get_problems() & libdnf5.base.GoalProblem_SOLVER_ERROR != 0:
msg = "Depsolve Error occurred"
else:
msg = "Failed to install some of the specified packages"
self.module.fail_json(
msg=msg,
failures=failures,
rc=1,
)
# NOTE dnf module compat
actions_compat_map = {
"Install": "Installed",
"Remove": "Removed",
"Replace": "Installed",
"Upgrade": "Installed",
"Replaced": "Removed",
}
changed = bool(transaction.get_transaction_packages())
for pkg in transaction.get_transaction_packages():
if self.download_only:
action = "Downloaded"
else:
action = libdnf5.base.transaction.transaction_item_action_to_string(pkg.get_action())
results.append("{}: {}".format(actions_compat_map.get(action, action), pkg.get_package().get_nevra()))
msg = ""
if self.module.check_mode:
if results:
msg = "Check mode: No changes made, but would have if not in check mode"
elif changed:
transaction.download()
if not self.download_only:
transaction.set_description("ansible dnf5 module")
result = transaction.run()
if result == libdnf5.base.Transaction.TransactionRunResult_ERROR_GPG_CHECK:
self.module.fail_json(
msg="Failed to validate GPG signatures: {}".format(",".join(transaction.get_gpg_signature_problems())),
failures=[],
rc=1,
)
elif result != libdnf5.base.Transaction.TransactionRunResult_SUCCESS:
self.module.fail_json(
msg="Failed to install some of the specified packages",
failures=["{}: {}".format(transaction.transaction_result_to_string(result), log) for log in transaction.get_transaction_problems()],
rc=1,
)
if not msg and not results:
msg = "Nothing to do"
self.module.exit_json(
results=results,
changed=changed,
msg=msg,
rc=0,
)
def main():
yumdnf_argument_spec["argument_spec"].update(
dict(
auto_install_module_deps=dict(type="bool", default=True),
)
)
module = AnsibleModule(**yumdnf_argument_spec)
try:
Dnf5Module(module).run()
except LIBDNF5_ERRORS as e:
module.fail_json(msg=str(e), failures=[], rc=1)
if __name__ == "__main__":
main()
| Dnf5Module |
python | scipy__scipy | scipy/interpolate/_cubic.py | {
"start": 13908,
"end": 22329
} | class ____(CubicHermiteSpline):
r"""Akima "visually pleasing" interpolator (C1 smooth).
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (npoints, )
1-D array of monotonically increasing real values.
y : ndarray, shape (..., npoints, ...)
N-D array of real values. The length of ``y`` along the interpolation axis
must be equal to the length of ``x``. Use the ``axis`` parameter to
select the interpolation axis.
axis : int, optional
Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
to ``axis=0``.
method : {'akima', 'makima'}, optional
If ``"makima"``, use the modified Akima interpolation [2]_.
Defaults to ``"akima"``, use the Akima interpolation [1]_.
.. versionadded:: 1.13.0
extrapolate : {bool, None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If None,
``extrapolate`` is set to False.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
See Also
--------
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
Let :math:`\delta_i = (y_{i+1} - y_i) / (x_{i+1} - x_i)` be the slopes of
the interval :math:`\left[x_i, x_{i+1}\right)`. Akima's derivative at
:math:`x_i` is defined as:
.. math::
d_i = \frac{w_1}{w_1 + w_2}\delta_{i-1} + \frac{w_2}{w_1 + w_2}\delta_i
In the Akima interpolation [1]_ (``method="akima"``), the weights are:
.. math::
\begin{aligned}
w_1 &= |\delta_{i+1} - \delta_i| \\
w_2 &= |\delta_{i-1} - \delta_{i-2}|
\end{aligned}
In the modified Akima interpolation [2]_ (``method="makima"``),
to eliminate overshoot and avoid edge cases of both numerator and
denominator being equal to 0, the weights are modified as follows:
.. math::
\begin{align*}
w_1 &= |\delta_{i+1} - \delta_i| + |\delta_{i+1} + \delta_i| / 2 \\
w_2 &= |\delta_{i-1} - \delta_{i-2}| + |\delta_{i-1} + \delta_{i-2}| / 2
\end{align*}
Examples
--------
Comparison of ``method="akima"`` and ``method="makima"``:
>>> import numpy as np
>>> from scipy.interpolate import Akima1DInterpolator
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(1, 7, 7)
>>> y = np.array([-1, -1, -1, 0, 1, 1, 1])
>>> xs = np.linspace(min(x), max(x), num=100)
>>> y_akima = Akima1DInterpolator(x, y, method="akima")(xs)
>>> y_makima = Akima1DInterpolator(x, y, method="makima")(xs)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y, "o", label="data")
>>> ax.plot(xs, y_akima, label="akima")
>>> ax.plot(xs, y_makima, label="makima")
>>> ax.legend()
>>> fig.show()
The overshoot that occurred in ``"akima"`` has been avoided in ``"makima"``.
References
----------
.. [1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602. :doi:`10.1145/321607.321609`
.. [2] Makima Piecewise Cubic Interpolation. Cleve Moler and Cosmin Ionita, 2019.
https://blogs.mathworks.com/cleve/2019/04/29/makima-piecewise-cubic-interpolation/
"""
# PchipInterpolator is not generic in scipy-stubs
__class_getitem__ = None
def __init__(self, x, y, axis=0, *, method: Literal["akima", "makima"]="akima",
extrapolate:bool | None = None):
if method not in {"akima", "makima"}:
raise NotImplementedError(f"`method`={method} is unsupported.")
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
xp = array_namespace(x, y)
x, dx, y, axis, _ = prepare_input(x, y, axis, xp=xp)
if xp.isdtype(y.dtype, "complex floating"):
msg = ("`Akima1DInterpolator` only works with real values for `y`. "
"If you are trying to use the real components of the passed array, "
"use `np.real` on the array before passing to "
"`Akima1DInterpolator`.")
raise ValueError(msg)
# Akima extrapolation historically False; parent class defaults to True.
extrapolate = False if extrapolate is None else extrapolate
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
xv = xp.reshape(x, (x.shape[0],) + (1,)*(y.ndim-1))
hk = xv[1:, ...] - xv[:-1, ...]
mk = (y[1:, ...] - y[:-1, ...]) / hk
t = xp.zeros_like(y)
t[...] = mk
else:
# determine slopes between breakpoints
m = xp.empty((x.shape[0] + 3, ) + y.shape[1:])
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2, ...] = xp.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1, ...] = 2. * m[2, ...] - m[3, ...]
m[0, ...] = 2. * m[1, ...] - m[2, ...]
# ... and on the right
m[-2, ...] = 2. * m[-3, ...] - m[-4, ...]
m[-1, ...] = 2. * m[-2, ...] - m[-3, ...]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not
# defined. This is the fill value:
t = .5 * (m[3:, ...] + m[:-3, ...])
# get the denominator of the slope t
dm = xp.abs(xp.diff(m, axis=0))
if method == "makima":
pm = xp.abs(m[1:, ...] + m[:-1, ...])
f1 = dm[2:, ...] + 0.5 * pm[2:, ...]
f2 = dm[:-2, ...] + 0.5 * pm[:-2, ...]
else:
f1 = dm[2:, ...]
f2 = dm[:-2, ...]
# makima is more numerically stable for small f12,
# so a finite cutoff should not improve any behavior
# however, akima has a qualitative discontinuity near f12=0
# a finite cutoff moves it, but cannot remove it.
# the cutoff break_mult could be made a keyword argument
# method='akima' also benefits from a check for m2=m3
break_mult = 1.e-9
f12 = f1 + f2
# These are the mask of where the slope at breakpoint is defined:
mmax = xp.max(f12) if xp_size(f12) > 0 else -xp.inf
ind = xp.nonzero(f12 > break_mult * mmax)
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = m[(x_ind + 1,) + y_ind] + (
(f2[ind] / f12[ind])
* (m[(x_ind + 2,) + y_ind] - m[(x_ind + 1,) + y_ind])
)
super().__init__(x, y, t, axis=0, extrapolate=extrapolate)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1-D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@xp_capabilities(
cpu_only=True, jax_jit=False,
skip_backends=[
("dask.array",
"https://github.com/data-apis/array-api-extra/issues/488")
]
)
| Akima1DInterpolator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1310536,
"end": 1313253
} | class ____(sgqlc.types.Type, Node):
"""A column inside a project."""
__schema__ = github_schema
__field_names__ = ("cards", "created_at", "database_id", "name", "project", "purpose", "resource_path", "updated_at", "url")
cards = sgqlc.types.Field(
sgqlc.types.non_null(ProjectCardConnection),
graphql_name="cards",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"archived_states",
sgqlc.types.Arg(
sgqlc.types.list_of(ProjectCardArchivedState), graphql_name="archivedStates", default=("ARCHIVED", "NOT_ARCHIVED")
),
),
)
),
)
"""List of cards in the column
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `archived_states` (`[ProjectCardArchivedState]`): A list of
archived states to filter the cards by (default: `[ARCHIVED,
NOT_ARCHIVED]`)
"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The project column's name."""
project = sgqlc.types.Field(sgqlc.types.non_null(Project), graphql_name="project")
"""The project that contains this column."""
purpose = sgqlc.types.Field(ProjectColumnPurpose, graphql_name="purpose")
"""The semantic purpose of the column"""
resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="resourcePath")
"""The HTTP path for this project column"""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The HTTP URL for this project column"""
| ProjectColumn |
python | Pylons__pyramid | tests/test_authentication.py | {
"start": 54747,
"end": 58520
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.authentication import SessionAuthenticationPolicy
return SessionAuthenticationPolicy
def _makeOne(self, callback=None, prefix=''):
return self._getTargetClass()(prefix=prefix, callback=callback)
def test_class_implements_IAuthenticationPolicy(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import IAuthenticationPolicy
verifyClass(IAuthenticationPolicy, self._getTargetClass())
def test_instance_implements_IAuthenticationPolicy(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IAuthenticationPolicy
verifyObject(IAuthenticationPolicy, self._makeOne())
def test_unauthenticated_userid_returns_None(self):
request = DummyRequest()
policy = self._makeOne()
self.assertEqual(policy.unauthenticated_userid(request), None)
def test_unauthenticated_userid(self):
request = DummyRequest(session={'userid': 'fred'})
policy = self._makeOne()
self.assertEqual(policy.unauthenticated_userid(request), 'fred')
def test_authenticated_userid_no_cookie_identity(self):
request = DummyRequest()
policy = self._makeOne()
self.assertEqual(policy.authenticated_userid(request), None)
def test_authenticated_userid_callback_returns_None(self):
request = DummyRequest(session={'userid': 'fred'})
def callback(userid, request):
return None
policy = self._makeOne(callback)
self.assertEqual(policy.authenticated_userid(request), None)
def test_authenticated_userid(self):
request = DummyRequest(session={'userid': 'fred'})
def callback(userid, request):
return True
policy = self._makeOne(callback)
self.assertEqual(policy.authenticated_userid(request), 'fred')
def test_effective_principals_no_identity(self):
from pyramid.authorization import Everyone
request = DummyRequest()
policy = self._makeOne()
self.assertEqual(policy.effective_principals(request), [Everyone])
def test_effective_principals_callback_returns_None(self):
from pyramid.authorization import Everyone
request = DummyRequest(session={'userid': 'fred'})
def callback(userid, request):
return None
policy = self._makeOne(callback)
self.assertEqual(policy.effective_principals(request), [Everyone])
def test_effective_principals(self):
from pyramid.authorization import Authenticated, Everyone
request = DummyRequest(session={'userid': 'fred'})
def callback(userid, request):
return ['group.foo']
policy = self._makeOne(callback)
self.assertEqual(
policy.effective_principals(request),
[Everyone, Authenticated, 'fred', 'group.foo'],
)
def test_remember(self):
request = DummyRequest()
policy = self._makeOne()
result = policy.remember(request, 'fred')
self.assertEqual(request.session.get('userid'), 'fred')
self.assertEqual(result, [])
def test_forget(self):
request = DummyRequest(session={'userid': 'fred'})
policy = self._makeOne()
result = policy.forget(request)
self.assertEqual(request.session.get('userid'), None)
self.assertEqual(result, [])
def test_forget_no_identity(self):
request = DummyRequest()
policy = self._makeOne()
result = policy.forget(request)
self.assertEqual(request.session.get('userid'), None)
self.assertEqual(result, [])
| TestSessionAuthenticationPolicy |
python | astropy__astropy | astropy/table/tests/test_table.py | {
"start": 34704,
"end": 34953
} | class ____(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns["a"]
a[2] = 10
assert t["a"][2] == 10
@pytest.mark.usefixtures("table_types")
| TestTableColumn |
python | PrefectHQ__prefect | src/integrations/prefect-azure/prefect_azure/credentials.py | {
"start": 2597,
"end": 10154
} | class ____(Block):
"""
Stores credentials for authenticating with Azure Blob Storage.
Args:
account_url: The URL for your Azure storage account. If provided, the account
URL will be used to authenticate with the discovered default Azure
credentials.
connection_string: The connection string to your Azure storage account. If
provided, the connection string will take precedence over the account URL.
Example:
Load stored Azure Blob Storage credentials and retrieve a blob service client:
```python
from prefect_azure import AzureBlobStorageCredentials
azure_credentials_block = AzureBlobStorageCredentials.load("BLOCK_NAME")
blob_service_client = azure_credentials_block.get_blob_client()
```
"""
_block_type_name = "Azure Blob Storage Credentials"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/54e3fa7e00197a4fbd1d82ed62494cb58d08c96a-250x250.png" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-azure" # noqa
_credential: Optional[ADefaultAzureCredential] = PrivateAttr(default=None)
connection_string: Optional[SecretStr] = Field(
default=None,
description=(
"The connection string to your Azure storage account. If provided, the "
"connection string will take precedence over the account URL."
),
)
account_url: Optional[str] = Field(
default=None,
title="Account URL",
description=(
"The URL for your Azure storage account. If provided, the account "
"URL will be used to authenticate with the discovered default "
"Azure credentials."
),
)
@model_validator(mode="before")
@classmethod
def check_connection_string_or_account_url(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""
Checks that either a connection string or account URL is provided, not both.
"""
has_account_url = values.get("account_url") is not None
has_conn_str = values.get("connection_string") is not None
if not has_account_url and not has_conn_str:
raise ValueError(
"Must provide either a connection string or an account URL."
)
if has_account_url and has_conn_str:
raise ValueError(
"Must provide either a connection string or account URL, but not both."
)
return values
@_raise_help_msg("blob_storage")
def get_client(self) -> "BlobServiceClient":
"""
Returns an authenticated base Blob Service client that can be used to create
other clients for Azure services.
Example:
Create an authorized Blob Service session
```python
import os
import asyncio
from prefect import flow
from prefect_azure import AzureBlobStorageCredentials
@flow
async def example_get_client_flow():
connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
azure_credentials = AzureBlobStorageCredentials(
connection_string=connection_string,
)
async with azure_credentials.get_client() as blob_service_client:
# run other code here
pass
asyncio.run(example_get_client_flow())
```
"""
if self.connection_string is None:
self._credential = self._credential or ADefaultAzureCredential()
return BlobServiceClient(
account_url=self.account_url,
credential=self._credential,
)
return BlobServiceClient.from_connection_string(
self.connection_string.get_secret_value()
)
@_raise_help_msg("blob_storage")
def get_blob_client(self, container: str, blob: str) -> "BlobClient":
"""
Returns an authenticated Blob client that can be used to
download and upload blobs.
Args:
container: Name of the Blob Storage container to retrieve from.
blob: Name of the blob within this container to retrieve.
Example:
Create an authorized Blob session
```python
import os
import asyncio
from prefect import flow
from prefect_azure import AzureBlobStorageCredentials
@flow
async def example_get_blob_client_flow():
connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
azure_credentials = AzureBlobStorageCredentials(
connection_string=connection_string,
)
async with azure_credentials.get_blob_client(
"container", "blob"
) as blob_client:
# run other code here
pass
asyncio.run(example_get_blob_client_flow())
```
"""
if self.connection_string is None:
self._credential = self._credential or ADefaultAzureCredential()
return BlobClient(
account_url=self.account_url,
container_name=container,
credential=self._credential,
blob_name=blob,
)
blob_client = BlobClient.from_connection_string(
self.connection_string.get_secret_value(), container, blob
)
return blob_client
@_raise_help_msg("blob_storage")
def get_container_client(self, container: str) -> "ContainerClient":
"""
Returns an authenticated Container client that can be used to create clients
for Azure services.
Args:
container: Name of the Blob Storage container to retrieve from.
Example:
Create an authorized Container session
```python
import os
import asyncio
from prefect import flow
from prefect_azure import AzureBlobStorageCredentials
@flow
async def example_get_container_client_flow():
connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
azure_credentials = AzureBlobStorageCredentials(
connection_string=connection_string,
)
async with azure_credentials.get_container_client(
"container"
) as container_client:
# run other code here
pass
asyncio.run(example_get_container_client_flow())
```
"""
if self.connection_string is None:
self._credential = self._credential or ADefaultAzureCredential()
return ContainerClient(
account_url=self.account_url,
container_name=container,
credential=self._credential,
)
container_client = ContainerClient.from_connection_string(
self.connection_string.get_secret_value(), container
)
return container_client
async def aclose(self):
"""Cleanup resources."""
if self._credential:
await self._credential.close()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.aclose()
| AzureBlobStorageCredentials |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_task_runner.py | {
"start": 90342,
"end": 98855
} | class ____:
FROM = "from@airflow"
@pytest.mark.parametrize(
("emails", "sent"),
[
pytest.param(
"test@example.com",
True,
id="one-email",
),
pytest.param(
["test@example.com"],
True,
id="one-email-as-list",
),
pytest.param(
["test@example.com", "test2@example.com"],
True,
id="multiple-email-as-list",
),
pytest.param(None, False, id="no-email"),
pytest.param([], False, id="no-email-as-list"),
],
)
def test_email_on_retry(self, emails, sent, create_runtime_ti, mock_supervisor_comms):
"""Test email notification on task retry."""
from airflow.sdk.execution_time.task_runner import finalize, run
class ZeroDivsionOperator(BaseOperator):
def execute(self, context):
1 // 0
task = ZeroDivsionOperator(
task_id="divide_by_zero_task",
email=emails,
email_on_retry=True,
retries=2,
)
runtime_ti = create_runtime_ti(task=task)
context = runtime_ti.get_template_context()
log = mock.MagicMock()
with conf_vars({("email", "from_email"): self.FROM}):
with mock.patch("airflow.providers.smtp.notifications.smtp.SmtpNotifier") as mock_smtp_notifier:
state, _, error = run(runtime_ti, context, log)
finalize(runtime_ti, state, context, log, error)
if not sent:
mock_smtp_notifier.assert_not_called()
else:
mock_smtp_notifier.assert_called_once()
kwargs = mock_smtp_notifier.call_args.kwargs
assert kwargs["from_email"] == self.FROM
assert kwargs["to"] == emails
assert (
kwargs["html_content"]
== 'Try {{try_number}} out of {{max_tries + 1}}<br>Exception:<br>{{exception_html}}<br>Log: <a href="{{ti.log_url}}">Link</a><br>Host: {{ti.hostname}}<br>Mark success: <a href="{{ti.mark_success_url}}">Link</a><br>'
)
@pytest.mark.parametrize(
("emails", "sent"),
[
pytest.param(
"test@example.com",
True,
id="one-email",
),
pytest.param(
["test@example.com"],
True,
id="one-email-as-list",
),
pytest.param(
["test@example.com", "test2@example.com"],
True,
id="multiple-email-as-list",
),
pytest.param(None, False, id="no-email"),
pytest.param([], False, id="no-email-as-list"),
],
)
def test_email_on_failure(self, emails, sent, create_runtime_ti, mock_supervisor_comms):
"""Test email notification on task failure."""
from airflow.exceptions import AirflowFailException
from airflow.sdk.execution_time.task_runner import finalize, run
class FailingOperator(BaseOperator):
def execute(self, context):
raise AirflowFailException("Task failed on purpose")
task = FailingOperator(
task_id="failing_task",
email=emails,
email_on_failure=True,
)
runtime_ti = create_runtime_ti(task=task)
context = runtime_ti.get_template_context()
log = mock.MagicMock()
with conf_vars({("email", "from_email"): self.FROM}):
with mock.patch("airflow.providers.smtp.notifications.smtp.SmtpNotifier") as mock_smtp_notifier:
state, _, error = run(runtime_ti, context, log)
finalize(runtime_ti, state, context, log, error)
if not sent:
mock_smtp_notifier.assert_not_called()
else:
mock_smtp_notifier.assert_called_once()
kwargs = mock_smtp_notifier.call_args.kwargs
assert kwargs["from_email"] == self.FROM
assert kwargs["to"] == emails
assert (
kwargs["html_content"]
== 'Try {{try_number}} out of {{max_tries + 1}}<br>Exception:<br>{{exception_html}}<br>Log: <a href="{{ti.log_url}}">Link</a><br>Host: {{ti.hostname}}<br>Mark success: <a href="{{ti.mark_success_url}}">Link</a><br>'
)
def test_email_with_custom_templates(self, create_runtime_ti, mock_supervisor_comms, tmp_path):
"""Test email notification respects custom subject and html_content templates."""
from airflow.exceptions import AirflowFailException
subject_template = tmp_path / "custom_subject.jinja2"
html_template = tmp_path / "custom_html.html"
subject_template.write_text("Custom Subject: Task {{ti.task_id}} Failed\n")
html_template.write_text(
"<h1>Custom Template</h1><p>Task: {{ti.task_id}}</p><p>Error: {{exception_html}}</p>"
)
class FailingOperator(BaseOperator):
def execute(self, context):
raise AirflowFailException("Task failed for template test")
task = FailingOperator(
task_id="template_test_task",
email=["test@example.com"],
email_on_failure=True,
)
runtime_ti = create_runtime_ti(task=task)
context = runtime_ti.get_template_context()
log = mock.MagicMock()
with conf_vars(
{
("email", "subject_template"): str(subject_template),
("email", "html_content_template"): str(html_template),
("email", "from_email"): self.FROM,
}
):
with mock.patch("airflow.providers.smtp.notifications.smtp.SmtpNotifier") as mock_smtp_notifier:
state, _, error = run(runtime_ti, context, log)
finalize(runtime_ti, state, context, log, error)
mock_smtp_notifier.assert_called_once()
kwargs = mock_smtp_notifier.call_args.kwargs
assert kwargs["subject"] == "Custom Subject: Task {{ti.task_id}} Failed\n"
assert (
kwargs["html_content"]
== "<h1>Custom Template</h1><p>Task: {{ti.task_id}}</p><p>Error: {{exception_html}}</p>"
)
assert kwargs["from_email"] == self.FROM
@pytest.mark.enable_redact
def test_rendered_templates_mask_secrets(self, create_runtime_ti, mock_supervisor_comms):
"""Test that secrets registered with mask_secret() are redacted in rendered template fields."""
from unittest.mock import call
from airflow.sdk._shared.secrets_masker import _secrets_masker
from airflow.sdk.log import mask_secret
_secrets_masker().add_mask("admin_user_12345", None)
class CustomOperator(BaseOperator):
template_fields = ("username", "region")
def __init__(self, username, region, *args, **kwargs):
super().__init__(*args, **kwargs)
self.username = username
self.region = region
def execute(self, context):
# Only mask username
mask_secret(self.username)
task = CustomOperator(
task_id="test_masking",
username="admin_user_12345",
region="us-west-2",
)
runtime_ti = create_runtime_ti(task=task, dag_id="test_secrets_in_rtif")
run(runtime_ti, context=runtime_ti.get_template_context(), log=mock.MagicMock())
assert (
call(MaskSecret(value="admin_user_12345", name=None, type="MaskSecret"))
in mock_supervisor_comms.send.mock_calls
)
# Region should not be masked
assert (
call(MaskSecret(value="us-west-2", name=None, type="MaskSecret"))
not in mock_supervisor_comms.send.mock_calls
)
assert (
call(
msg=SetRenderedFields(
rendered_fields={"username": "***", "region": "us-west-2"},
type="SetRenderedFields",
)
)
in mock_supervisor_comms.send.mock_calls
)
| TestEmailNotifications |
python | getsentry__sentry | tests/sentry/web/frontend/test_react_page.py | {
"start": 594,
"end": 20718
} | class ____(TestCase):
def test_redirects_unauthenticated_request(self) -> None:
owner = self.create_user("bar@example.com")
org = self.create_organization(owner=owner)
path = reverse("sentry-organization-home", args=[org.slug])
resp = self.client.get(path)
self.assertRedirects(resp, reverse("sentry-auth-organization", args=[org.slug]))
assert resp["X-Robots-Tag"] == "noindex, nofollow"
def test_superuser_can_load(self) -> None:
org = self.create_organization(owner=self.user)
path = reverse("sentry-organization-home", args=[org.slug])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/base-react.html")
assert resp.context["request"]
def test_redirects_user_to_auth_without_membership(self) -> None:
owner = self.create_user("bar@example.com")
org = self.create_organization(owner=owner)
non_member = self.create_user("foo@example.com")
path = reverse("sentry-organization-home", args=[org.slug])
self.login_as(non_member)
resp = self.client.get(path)
self.assertRedirects(resp, reverse("sentry-auth-organization", args=[org.slug]))
# ensure we don't redirect to auth if its not a valid org
path = reverse("sentry-organization-home", args=["foobar"])
resp = self.client.get(path)
assert resp.status_code == 302
assert resp["Location"] != reverse("sentry-auth-organization", args=[org.slug])
# ensure we don't redirect with valid membership
path = reverse("sentry-organization-home", args=[org.slug])
self.login_as(owner)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/base-react.html")
assert resp.context["request"]
def test_inactive_superuser_bypasses_server_auth(self) -> None:
owner = self.create_user("bar@example.com")
org = self.create_organization(owner=owner)
non_member = self.create_user("foo@example.com", is_superuser=True)
path = reverse("sentry-organization-home", args=[org.slug])
self.login_as(non_member)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, "sentry/base-react.html")
assert resp.context["request"]
def test_org_subpages_capture_slug(self) -> None:
owner = self.create_user("bar@example.com")
org = self.create_organization(owner=owner)
# User is *not* logged in. Check for redirect to org's auth login.
for path in [
f"/organizations/{org.slug}/settings/",
f"/organizations/{org.slug}/discover/",
f"/organizations/{org.slug}/releases/1.0/?project=1",
f"/organizations/{org.slug}/new_page_that_does_not_exist_yet/",
f"/settings/{org.slug}/developer-settings/",
f"/settings/{org.slug}/new_page_that_does_not_exist_yet/",
]:
resp = self.client.get(path)
assert resp.status_code == 302
assert resp.headers["Location"] == f"/auth/login/{org.slug}/"
def test_redirect_to_customer_domain(self) -> None:
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
with self.feature({"system:multi-region": False}):
assert "activeorg" not in self.client.session
response = self.client.get(reverse("sentry-organization-issue-list", args=[org.slug]))
assert response.status_code == 200
assert self.client.session["activeorg"]
with self.feature({"system:multi-region": True}):
# Redirect to customer domain
response = self.client.get(
reverse("sentry-organization-issue-list", args=[org.slug]), follow=True
)
assert response.status_code == 200
assert response.redirect_chain == [(f"http://{org.slug}.testserver/issues/", 302)]
response = self.client.get(reverse("issues"), follow=True)
assert response.status_code == 200
assert response.redirect_chain == [(f"http://{org.slug}.testserver/issues/", 302)]
response = self.client.get("/", follow=True)
assert response.status_code == 200
assert response.redirect_chain == [
(f"/organizations/{org.slug}/issues/", 302),
(f"http://{org.slug}.testserver/issues/", 302),
]
# No redirect if customer domain is already being used
response = self.client.get(
reverse("sentry-organization-issue-list", args=[org.slug]),
HTTP_HOST=f"{org.slug}.testserver",
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == []
response = self.client.get(
reverse("issues"),
HTTP_HOST=f"{org.slug}.testserver",
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == []
response = self.client.get(
"/",
HTTP_HOST=f"{org.slug}.testserver",
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == [(f"http://{org.slug}.testserver/issues/", 302)]
@override_regions((us,))
def test_redirect_to_customer_domain_from_region_domain(self) -> None:
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
# Force activeorg state
self.client.session["activeorg"] = org.slug
self.client.session.save()
with self.feature({"system:multi-region": True}):
response = self.client.get(
"/issues/",
HTTP_HOST="us.testserver",
)
assert response.status_code == 302
assert response["Location"] == f"http://{org.slug}.testserver/issues/"
def test_does_not_redirect_to_customer_domain_for_unsupported_paths(self) -> None:
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
with self.feature({"system:multi-region": True}):
url_name = "sentry-organization-create"
url_name_is_non_customer_domain = any(
fnmatch(url_name, p) for p in NON_CUSTOMER_DOMAIN_URL_NAMES
)
assert (
url_name_is_non_customer_domain
), "precondition missing. org-create should be non-customer-domain"
# Induce last active org.
assert "activeorg" not in self.client.session
response = self.client.get(
reverse(
"sentry-organization-issue-list",
args=[org.slug],
),
HTTP_HOST=f"{org.slug}.testserver",
)
assert response.status_code == 200
assert self.client.session["activeorg"]
# No redirect to customer domain if path is not meant to be accessed in customer domain context.
# There should be no redirect to the last active org.
response = self.client.get(
reverse(url_name),
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == []
def test_non_customer_domain_url_names(self) -> None:
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
def extract_url_names(urlpatterns, parents):
for pattern in urlpatterns:
path = parents[:] + [pattern]
if isinstance(pattern, URLResolver):
yield from extract_url_names(pattern.url_patterns, path)
else:
url_pattern = path[-1]
url_name = url_pattern.name
if (
url_name
and url_pattern.callback
and hasattr(url_pattern.callback, "view_class")
and issubclass(url_pattern.callback.view_class, ReactMixin)
):
yield url_name
url_names = list(extract_url_names(get_resolver().url_patterns, []))
for url_name in url_names:
for url_name_pattern in NON_CUSTOMER_DOMAIN_URL_NAMES:
if not fnmatch(url_name, url_name_pattern):
continue
path = reverse(url_name)
# Does not redirect a non-customer domain URL
response = self.client.get(path)
self.assertTemplateUsed(response, "sentry/base-react.html")
assert response.status_code == 200
# Redirects for a customer domain URL
response = self.client.get(path, HTTP_HOST=f"{org.slug}.testserver")
assert response.status_code == 302
assert response["Location"] == f"http://testserver{path}"
def test_handles_unknown_url_name(self) -> None:
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
response = self.client.get(f"/settings/{org.slug}/projects/albertos-apples/keys/")
assert response.status_code == 200
self.assertTemplateUsed(response, "sentry/base-react.html")
def test_customer_domain_non_member(self) -> None:
self.create_organization(owner=self.user)
other_org = self.create_organization()
self.login_as(self.user)
with self.feature({"system:multi-region": True}):
# Should not be able to induce activeorg
assert "activeorg" not in self.client.session
response = self.client.get(
"/",
HTTP_HOST=f"{other_org.slug}.testserver",
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == [(f"http://{other_org.slug}.testserver/issues/", 302)]
assert "activeorg" not in self.client.session
def _run_customer_domain_elevated_privileges(self, is_superuser: bool, is_staff: bool):
user = self.create_user("foo@example.com", is_superuser=is_superuser, is_staff=is_staff)
org = self.create_organization(owner=user)
other_org = self.create_organization()
self.login_as(user, superuser=is_superuser, staff=is_staff)
with self.feature({"system:multi-region": True}):
# Induce activeorg
assert "activeorg" not in self.client.session
response = self.client.get(
"/",
HTTP_HOST=f"{other_org.slug}.testserver",
follow=True,
)
assert response.status_code == 200
if is_superuser:
assert response.redirect_chain == [
(f"http://{other_org.slug}.testserver/issues/", 302)
]
assert self.client.session["activeorg"] == other_org.slug
else:
assert response.redirect_chain == [
(f"http://{other_org.slug}.testserver/auth/login/{other_org.slug}/", 302)
]
assert "activeorg" not in self.client.session
# Accessing org without customer domain as superuser and/or staff.
response = self.client.get(
reverse("sentry-organization-issue-list", args=[org.slug]),
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == []
def test_customer_domain_non_member_org_superuser(self) -> None:
self._run_customer_domain_elevated_privileges(is_superuser=True, is_staff=False)
@override_options({"staff.ga-rollout": True})
def test_customer_domain_non_member_org_staff(self) -> None:
self._run_customer_domain_elevated_privileges(is_superuser=False, is_staff=True)
@override_options({"staff.ga-rollout": True})
def test_customer_domain_non_member_org_superuser_and_staff(self) -> None:
self._run_customer_domain_elevated_privileges(is_superuser=True, is_staff=True)
def test_customer_domain_superuser(self) -> None:
org = self.create_organization(owner=self.user)
other_org = self.create_organization(slug="albertos-apples")
self.login_as(self.user)
with self.feature({"system:multi-region": True}):
# Induce activeorg
response = self.client.get(
"/",
HTTP_HOST=f"{org.slug}.testserver",
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == [(f"http://{org.slug}.testserver/issues/", 302)]
assert self.client.session["activeorg"] == org.slug
# Access another org as superuser on customer domain
response = self.client.get("/", HTTP_HOST=f"{other_org.slug}.testserver", follow=True)
assert response.status_code == 200
assert response.redirect_chain == [
(f"http://{other_org.slug}.testserver/issues/", 302),
]
def test_customer_domain_loads(self) -> None:
org = self.create_organization(owner=self.user, status=OrganizationStatus.ACTIVE)
self.login_as(self.user)
with self.feature({"system:multi-region": True}):
response = self.client.get(
"/issues/",
HTTP_HOST=f"{org.slug}.testserver",
)
assert response.status_code == 200
self.assertTemplateUsed(response, "sentry/base-react.html")
assert response.context["request"]
assert self.client.session["activeorg"] == org.slug
def test_customer_domain_org_pending_deletion(self) -> None:
org = self.create_organization(owner=self.user, status=OrganizationStatus.PENDING_DELETION)
self.login_as(self.user)
with self.feature({"system:multi-region": True}):
response = self.client.get(
"/issues/",
HTTP_HOST=f"{org.slug}.testserver",
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == [
(f"http://{org.slug}.testserver/restore/", 302),
]
assert "activeorg" in self.client.session
def test_customer_domain_org_deletion_in_progress(self) -> None:
org = self.create_organization(
owner=self.user, status=OrganizationStatus.DELETION_IN_PROGRESS
)
self.login_as(self.user)
with self.feature({"system:multi-region": True}):
response = self.client.get(
"/issues/",
HTTP_HOST=f"{org.slug}.testserver",
follow=True,
)
assert response.status_code == 200
assert response.redirect_chain == [
("http://testserver/organizations/new/", 302),
]
assert "activeorg" in self.client.session
def test_document_policy_header_when_flag_is_enabled(self) -> None:
org = self.create_organization(owner=self.user)
self.login_as(self.user)
with self.feature({"organizations:profiling-browser": [org.slug]}):
response = self.client.get(
"/issues/",
HTTP_HOST=f"{org.slug}.testserver",
follow=True,
)
assert response.status_code == 200
assert response.headers["Document-Policy"] == "js-profiling"
def test_document_policy_header_when_flag_is_disabled(self) -> None:
org = self.create_organization(owner=self.user)
self.login_as(self.user)
response = self.client.get(
"/issues/",
HTTP_HOST=f"{org.slug}.testserver",
follow=True,
)
assert response.status_code == 200
assert "Document-Policy" not in response.headers
def test_dns_prefetch(self) -> None:
us_region = Region("us", 1, "https://us.testserver", RegionCategory.MULTI_TENANT)
de_region = Region("de", 1, "https://de.testserver", RegionCategory.MULTI_TENANT)
with override_regions(regions=[us_region, de_region]):
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
response = self.client.get("/issues/", HTTP_HOST=f"{org.slug}.testserver")
assert response.status_code == 200
response_body = response.content
assert '<link rel="dns-prefetch" href="http://us.testserver"' in response_body.decode(
"utf-8"
)
def test_preconnect(self) -> None:
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
with self.settings(STATIC_ORIGIN="https://s1.sentry-cdn.com"):
response = self.client.get("/issues/", HTTP_HOST=f"{org.slug}.testserver")
assert response.status_code == 200
response_body = response.content
assert (
'<link rel="preconnect" href="https://s1.sentry-cdn.com"'
in response_body.decode("utf-8")
)
def test_prefers_chonk_ui_enforced(self) -> None:
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
with self.feature(
{
"organizations:chonk-ui": [org.slug],
"organizations:chonk-ui-enforce": [org.slug],
}
):
response = self.client.get("/issues/", HTTP_HOST=f"{org.slug}.testserver")
assert response.status_code == 200
assert response.context["prefers_chonk_ui"] is True
def test_prefers_chonk_ui_user_preference(self) -> None:
from sentry.users.models.user_option import UserOption
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
UserOption.objects.set_value(user=user, key="prefers_chonk_ui", value=True)
with self.feature({"organizations:chonk-ui": [org.slug]}):
response = self.client.get("/issues/", HTTP_HOST=f"{org.slug}.testserver")
assert response.status_code == 200
assert response.context["prefers_chonk_ui"] is True
def test_prefers_chonk_ui_disabled(self) -> None:
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
response = self.client.get("/issues/", HTTP_HOST=f"{org.slug}.testserver")
assert response.status_code == 200
assert response.context["prefers_chonk_ui"] is False
def test_prefers_chonk_ui_enforce_overrides_user_preference(self) -> None:
from sentry.users.models.user_option import UserOption
user = self.create_user("bar@example.com")
org = self.create_organization(owner=user)
self.login_as(user)
UserOption.objects.set_value(user=user, key="prefers_chonk_ui", value=False)
with self.feature(
{
"organizations:chonk-ui": [org.slug],
"organizations:chonk-ui-enforce": [org.slug],
}
):
response = self.client.get("/issues/", HTTP_HOST=f"{org.slug}.testserver")
assert response.status_code == 200
assert response.context["prefers_chonk_ui"] is True
| ReactPageViewTest |
python | mlflow__mlflow | mlflow/utils/databricks_utils.py | {
"start": 8723,
"end": 22881
} | class ____:
spark: "SparkConnectSession"
image_version: str
runtime_version: str
platform_machine: str
mlflow_version: str
_dbconnect_udf_sandbox_info_cache: DBConnectUDFSandboxInfo | None = None
def get_dbconnect_udf_sandbox_info(spark):
"""
Get Databricks UDF sandbox info which includes the following fields:
- image_version like
'{major_version}.{minor_version}' or 'client.{major_version}.{minor_version}'
- runtime_version like '{major_version}.{minor_version}'
- platform_machine like 'x86_64' or 'aarch64'
- mlflow_version
"""
global _dbconnect_udf_sandbox_info_cache
from pyspark.sql.functions import pandas_udf
if (
_dbconnect_udf_sandbox_info_cache is not None
and spark is _dbconnect_udf_sandbox_info_cache.spark
):
return _dbconnect_udf_sandbox_info_cache
# version is like '15.4.x-scala2.12'
version = spark.sql("SELECT current_version().dbr_version").collect()[0][0]
major, minor, *_rest = version.split(".")
runtime_version = f"{major}.{minor}"
# For Databricks Serverless python REPL,
# the UDF sandbox runs on client image, which has version like 'client.1.1'
# in other cases, UDF sandbox runs on databricks runtime image with version like '15.4'
if is_in_databricks_runtime():
_dbconnect_udf_sandbox_info_cache = DBConnectUDFSandboxInfo(
spark=_get_active_spark_session(),
runtime_version=runtime_version,
image_version=get_databricks_runtime_version(),
platform_machine=platform.machine(),
# In databricks runtime, driver and executor should have the
# same version.
mlflow_version=mlflow.__version__,
)
else:
image_version = runtime_version
@pandas_udf("string")
def f(_):
import pandas as pd
platform_machine = platform.machine()
try:
import mlflow
mlflow_version = mlflow.__version__
except ImportError:
mlflow_version = ""
return pd.Series([f"{platform_machine}\n{mlflow_version}"])
platform_machine, mlflow_version = (
spark.range(1).select(f("id")).collect()[0][0].split("\n")
)
if mlflow_version == "":
mlflow_version = None
_dbconnect_udf_sandbox_info_cache = DBConnectUDFSandboxInfo(
spark=spark,
image_version=image_version,
runtime_version=runtime_version,
platform_machine=platform_machine,
mlflow_version=mlflow_version,
)
return _dbconnect_udf_sandbox_info_cache
def is_databricks_serverless(spark):
"""
Return True if running on Databricks Serverless notebook or
on Databricks Connect client that connects to Databricks Serverless.
"""
from mlflow.utils.spark_utils import is_spark_connect_mode
if not is_spark_connect_mode():
return False
if hasattr(spark.client, "metadata"):
metadata = spark.client.metadata()
else:
metadata = spark.client._builder.metadata()
return any(k == "x-databricks-session-id" for k, v in metadata)
def is_dbfs_fuse_available():
if not is_in_databricks_runtime():
return False
try:
return (
subprocess.call(
["mountpoint", "/dbfs"],
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
)
== 0
)
except Exception:
return False
def is_uc_volume_fuse_available():
try:
return (
subprocess.call(
["mountpoint", "/Volumes"],
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
)
== 0
)
except Exception:
return False
@_use_repl_context_if_available("isInCluster")
def is_in_cluster():
try:
spark_session = _get_active_spark_session()
return (
spark_session is not None
and spark_session.conf.get("spark.databricks.clusterUsageTags.clusterId", None)
is not None
)
except Exception:
return False
@_use_repl_context_if_available("notebookId")
def get_notebook_id():
"""Should only be called if is_in_databricks_notebook is true"""
if notebook_id := _get_property_from_spark_context("spark.databricks.notebook.id"):
return notebook_id
if (path := acl_path_of_acl_root()) and path.startswith("/workspace"):
return path.split("/")[-1]
return None
@_use_repl_context_if_available("notebookPath")
def get_notebook_path():
"""Should only be called if is_in_databricks_notebook is true"""
path = _get_property_from_spark_context("spark.databricks.notebook.path")
if path is not None:
return path
try:
return _get_command_context().notebookPath().get()
except Exception:
return _get_extra_context("notebook_path")
@_use_repl_context_if_available("clusterId")
def get_cluster_id():
spark_session = _get_active_spark_session()
if spark_session is None:
return None
return spark_session.conf.get("spark.databricks.clusterUsageTags.clusterId", None)
@_use_repl_context_if_available("jobGroupId")
def get_job_group_id():
try:
dbutils = _get_dbutils()
job_group_id = dbutils.entry_point.getJobGroupId()
if job_group_id is not None:
return job_group_id
except Exception:
return None
@_use_repl_context_if_available("replId")
def get_repl_id():
"""
Returns:
The ID of the current Databricks Python REPL.
"""
# Attempt to fetch the REPL ID from the Python REPL's entrypoint object. This REPL ID
# is guaranteed to be set upon REPL startup in DBR / MLR 9.0
try:
dbutils = _get_dbutils()
repl_id = dbutils.entry_point.getReplId()
if repl_id is not None:
return repl_id
except Exception:
pass
# If the REPL ID entrypoint property is unavailable due to an older runtime version (< 9.0),
# attempt to fetch the REPL ID from the Spark Context. This property may not be available
# until several seconds after REPL startup
try:
from pyspark import SparkContext
repl_id = SparkContext.getOrCreate().getLocalProperty("spark.databricks.replId")
if repl_id is not None:
return repl_id
except Exception:
pass
@_use_repl_context_if_available("jobId")
def get_job_id():
try:
return _get_command_context().jobId().get()
except Exception:
return _get_context_tag("jobId")
@_use_repl_context_if_available("idInJob")
def get_job_run_id():
try:
return _get_command_context().idInJob().get()
except Exception:
return _get_context_tag("idInJob")
@_use_repl_context_if_available("jobTaskType")
def get_job_type():
"""Should only be called if is_in_databricks_job is true"""
try:
return _get_command_context().jobTaskType().get()
except Exception:
return _get_context_tag("jobTaskType")
@_use_repl_context_if_available("jobType")
def get_job_type_info():
try:
return _get_context_tag("jobType")
except Exception:
return None
@_use_repl_context_if_available("commandRunId")
def get_command_run_id():
try:
return _get_command_context().commandRunId().get()
except Exception:
# Older runtimes may not have the commandRunId available
return None
@_use_repl_context_if_available("workloadId")
def get_workload_id():
try:
return _get_command_context().workloadId().get()
except Exception:
return _get_context_tag("workloadId")
@_use_repl_context_if_available("workloadClass")
def get_workload_class():
try:
return _get_command_context().workloadClass().get()
except Exception:
return _get_context_tag("workloadClass")
@_use_repl_context_if_available("apiUrl")
def get_webapp_url():
"""Should only be called if is_in_databricks_notebook or is_in_databricks_jobs is true"""
url = _get_property_from_spark_context("spark.databricks.api.url")
if url is not None:
return url
try:
return _get_command_context().apiUrl().get()
except Exception:
return _get_extra_context("api_url")
@_use_repl_context_if_available("workspaceId")
def get_workspace_id():
try:
return _get_command_context().workspaceId().get()
except Exception:
return _get_context_tag("orgId")
@_use_repl_context_if_available("browserHostName")
def get_browser_hostname():
try:
return _get_command_context().browserHostName().get()
except Exception:
return _get_context_tag("browserHostName")
def get_workspace_info_from_dbutils():
try:
dbutils = _get_dbutils()
if dbutils:
browser_hostname = get_browser_hostname()
workspace_host = "https://" + browser_hostname if browser_hostname else get_webapp_url()
workspace_id = get_workspace_id()
return workspace_host, workspace_id
except Exception:
pass
return None, None
@_use_repl_context_if_available("workspaceUrl", ignore_none=True)
def _get_workspace_url():
try:
if spark_session := _get_active_spark_session():
if workspace_url := spark_session.conf.get("spark.databricks.workspaceUrl", None):
return workspace_url
except Exception:
return None
def get_workspace_url():
if url := _get_workspace_url():
return f"https://{url}" if not url.startswith("https://") else url
return None
def warn_on_deprecated_cross_workspace_registry_uri(registry_uri):
workspace_host, workspace_id = get_workspace_info_from_databricks_secrets(
tracking_uri=registry_uri
)
if workspace_host is not None or workspace_id is not None:
_logger.warning(
"Accessing remote workspace model registries using registry URIs of the form "
"'databricks://scope:prefix', or by loading models via URIs of the form "
"'models://scope:prefix@databricks/model-name/stage-or-version', is deprecated. "
"Use Models in Unity Catalog instead for easy cross-workspace model access, with "
"granular per-user audit logging and no extra setup required. See "
"https://docs.databricks.com/machine-learning/manage-model-lifecycle/index.html "
"for more details."
)
def get_workspace_info_from_databricks_secrets(tracking_uri):
profile, key_prefix = get_db_info_from_uri(tracking_uri)
if key_prefix:
if dbutils := _get_dbutils():
workspace_id = dbutils.secrets.get(scope=profile, key=key_prefix + "-workspace-id")
workspace_host = dbutils.secrets.get(scope=profile, key=key_prefix + "-host")
return workspace_host, workspace_id
return None, None
def _fail_malformed_databricks_auth(uri):
if uri and uri.startswith(_DATABRICKS_UNITY_CATALOG_SCHEME):
uri_name = "registry URI"
uri_scheme = _DATABRICKS_UNITY_CATALOG_SCHEME
else:
uri_name = "tracking URI"
uri_scheme = "databricks"
if is_in_databricks_model_serving_environment():
raise MlflowException(
f"Reading Databricks credential configuration in model serving failed. "
f"Most commonly, this happens because the model currently "
f"being served was logged without Databricks resource dependencies "
f"properly specified. Re-log your model, specifying resource dependencies as "
f"described in "
f"https://docs.databricks.com/en/generative-ai/agent-framework/log-agent.html"
f"#specify-resources-for-pyfunc-or-langchain-agent "
f"and then register and attempt to serve it again. Alternatively, you can explicitly "
f"configure authentication by setting environment variables as described in "
f"https://docs.databricks.com/en/generative-ai/agent-framework/deploy-agent.html"
f"#manual-authentication. "
f"Additional debug info: the MLflow {uri_name} was set to '{uri}'"
)
raise MlflowException(
f"Reading Databricks credential configuration failed with MLflow {uri_name} '{uri}'. "
"Please ensure that the 'databricks-sdk' PyPI library is installed, the tracking "
"URI is set correctly, and Databricks authentication is properly configured. "
f"The {uri_name} can be either '{uri_scheme}' "
f"(using profile name specified by 'DATABRICKS_CONFIG_PROFILE' environment variable "
f"or using 'DEFAULT' authentication profile if 'DATABRICKS_CONFIG_PROFILE' environment "
f"variable does not exist) or '{uri_scheme}://{{profile}}'. "
"You can configure Databricks authentication in several ways, for example by "
"specifying environment variables (e.g. DATABRICKS_HOST + DATABRICKS_TOKEN) or "
"logging in using 'databricks auth login'. \n"
"For details on configuring Databricks authentication, please refer to "
"'https://docs.databricks.com/en/dev-tools/auth/index.html#unified-auth'."
)
# Helper function to attempt to read OAuth Token from
# mounted file in Databricks Model Serving environment
def get_model_dependency_oauth_token(should_retry=True):
try:
with open(_MODEL_DEPENDENCY_OAUTH_TOKEN_FILE_PATH) as f:
oauth_dict = json.load(f)
return oauth_dict["OAUTH_TOKEN"][0]["oauthTokenValue"]
except Exception as e:
# sleep and retry in case of any race conditions with OAuth refreshing
if should_retry:
time.sleep(0.5)
return get_model_dependency_oauth_token(should_retry=False)
else:
raise MlflowException(
"Unable to read Oauth credentials from file mount for Databricks "
"Model Serving dependency failed"
) from e
| DBConnectUDFSandboxInfo |
python | huggingface__transformers | src/transformers/models/swinv2/modeling_swinv2.py | {
"start": 4026,
"end": 5690
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Masked image modeling (MLM) loss.
reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed pixel values.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
reconstruction: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@property
def logits(self):
warnings.warn(
"logits attribute is deprecated and will be removed in version 5 of Transformers."
" Please use the reconstruction attribute to retrieve the final output instead.",
FutureWarning,
)
return self.reconstruction
@dataclass
@auto_docstring(
custom_intro="""
Swinv2 outputs for image classification.
"""
)
# Copied from transformers.models.swin.modeling_swin.SwinImageClassifierOutput with Swin->Swinv2
| Swinv2MaskedImageModelingOutput |
python | getsentry__sentry | src/sentry/flags/models.py | {
"start": 793,
"end": 1244
} | class ____(Enum):
EMAIL = 0
ID = 1
NAME = 2
@classmethod
def to_string(cls, integer):
if integer == 0:
return "email"
if integer == 1:
return "id"
if integer == 2:
return "name"
raise ValueError
CREATED_BY_TYPE_MAP = {
"email": CreatedByTypeEnum.EMAIL.value,
"id": CreatedByTypeEnum.ID.value,
"name": CreatedByTypeEnum.NAME.value,
}
| CreatedByTypeEnum |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_hyperlink17.py | {
"start": 315,
"end": 1063
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink17.xlsx")
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with hyperlinks.This
example doesn't have any link formatting and tests the relationship
linkage code.
"""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url("A1", "http://google.com/some link")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/clsregistry.py | {
"start": 10998,
"end": 12221
} | class ____:
__slots__ = ("cls",)
cls: Type[Any]
def __init__(self, cls: Type[Any]):
self.cls = cls
def __getattr__(self, key: str) -> Any:
mp = class_mapper(self.cls, configure=False)
if mp:
if key not in mp.all_orm_descriptors:
raise AttributeError(
"Class %r does not have a mapped column named %r"
% (self.cls, key)
)
desc = mp.all_orm_descriptors[key]
if desc.extension_type is interfaces.NotExtension.NOT_EXTENSION:
assert isinstance(desc, attributes.QueryableAttribute)
prop = desc.property
if isinstance(prop, SynonymProperty):
key = prop.name
elif not isinstance(prop, ColumnProperty):
raise exc.InvalidRequestError(
"Property %r is not an instance of"
" ColumnProperty (i.e. does not correspond"
" directly to a Column)." % key
)
return getattr(self.cls, key)
inspection._inspects(_GetColumns)(
lambda target: inspection.inspect(target.cls)
)
| _GetColumns |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/overloadOverlap1.py | {
"start": 4488,
"end": 4691
} | class ____(Generic[_T1]):
@overload
def __call__(self, f: _T1) -> _T1: ...
@overload
def __call__(self, f: _T1 | None) -> _T1: ...
def __call__(self, f: _T1 | None) -> _T1: ...
| ClassA |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/base.py | {
"start": 92294,
"end": 96563
} | class ____(TransactionalContext):
"""Represent a database transaction in progress.
The :class:`.Transaction` object is procured by
calling the :meth:`_engine.Connection.begin` method of
:class:`_engine.Connection`::
from sqlalchemy import create_engine
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test")
connection = engine.connect()
trans = connection.begin()
connection.execute(text("insert into x (a, b) values (1, 2)"))
trans.commit()
The object provides :meth:`.rollback` and :meth:`.commit`
methods in order to control transaction boundaries. It
also implements a context manager interface so that
the Python ``with`` statement can be used with the
:meth:`_engine.Connection.begin` method::
with connection.begin():
connection.execute(text("insert into x (a, b) values (1, 2)"))
The Transaction object is **not** threadsafe.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
:meth:`_engine.Connection.begin_nested`
.. index::
single: thread safety; Transaction
""" # noqa
__slots__ = ()
_is_root: bool = False
is_active: bool
connection: Connection
def __init__(self, connection: Connection):
raise NotImplementedError()
@property
def _deactivated_from_connection(self) -> bool:
"""True if this transaction is totally deactivated from the connection
and therefore can no longer affect its state.
"""
raise NotImplementedError()
def _do_close(self) -> None:
raise NotImplementedError()
def _do_rollback(self) -> None:
raise NotImplementedError()
def _do_commit(self) -> None:
raise NotImplementedError()
@property
def is_valid(self) -> bool:
return self.is_active and not self.connection.invalidated
def close(self) -> None:
"""Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
try:
self._do_close()
finally:
assert not self.is_active
def rollback(self) -> None:
"""Roll back this :class:`.Transaction`.
The implementation of this may vary based on the type of transaction in
use:
* For a simple database transaction (e.g. :class:`.RootTransaction`),
it corresponds to a ROLLBACK.
* For a :class:`.NestedTransaction`, it corresponds to a
"ROLLBACK TO SAVEPOINT" operation.
* For a :class:`.TwoPhaseTransaction`, DBAPI-specific methods for two
phase transactions may be used.
"""
try:
self._do_rollback()
finally:
assert not self.is_active
def commit(self) -> None:
"""Commit this :class:`.Transaction`.
The implementation of this may vary based on the type of transaction in
use:
* For a simple database transaction (e.g. :class:`.RootTransaction`),
it corresponds to a COMMIT.
* For a :class:`.NestedTransaction`, it corresponds to a
"RELEASE SAVEPOINT" operation.
* For a :class:`.TwoPhaseTransaction`, DBAPI-specific methods for two
phase transactions may be used.
"""
try:
self._do_commit()
finally:
assert not self.is_active
def _get_subject(self) -> Connection:
return self.connection
def _transaction_is_active(self) -> bool:
return self.is_active
def _transaction_is_closed(self) -> bool:
return not self._deactivated_from_connection
def _rollback_can_be_called(self) -> bool:
# for RootTransaction / NestedTransaction, it's safe to call
# rollback() even if the transaction is deactive and no warnings
# will be emitted. tested in
# test_transaction.py -> test_no_rollback_in_deactive(?:_savepoint)?
return True
| Transaction |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_spans_performance.py | {
"start": 1678,
"end": 3281
} | class ____:
suspect_op_group_columns: list[str]
suspect_op_group_sort: list[str]
suspect_example_functions: list[str]
SPAN_PERFORMANCE_COLUMNS: dict[str, SpanPerformanceColumn] = {
"count": SpanPerformanceColumn(
["count()", "sumArray(spans_exclusive_time)"],
["count()", "sumArray(spans_exclusive_time)"],
["count", "sum"],
),
"avgOccurrence": SpanPerformanceColumn(
[
"count()",
"count_unique(id)",
"equation|count() / count_unique(id)",
"sumArray(spans_exclusive_time)",
],
["equation[0]", "sumArray(spans_exclusive_time)"],
["count", "sum"],
),
"sumExclusiveTime": SpanPerformanceColumn(
["sumArray(spans_exclusive_time)"],
["sumArray(spans_exclusive_time)"],
["sum"],
),
"p50ExclusiveTime": SpanPerformanceColumn(
["percentileArray(spans_exclusive_time, 0.50)"],
["percentileArray(spans_exclusive_time, 0.50)"],
["max"],
),
"p75ExclusiveTime": SpanPerformanceColumn(
["percentileArray(spans_exclusive_time, 0.75)"],
["percentileArray(spans_exclusive_time, 0.75)"],
["max"],
),
"p95ExclusiveTime": SpanPerformanceColumn(
["percentileArray(spans_exclusive_time, 0.95)"],
["percentileArray(spans_exclusive_time, 0.95)"],
["max"],
),
"p99ExclusiveTime": SpanPerformanceColumn(
["percentileArray(spans_exclusive_time, 0.99)"],
["percentileArray(spans_exclusive_time, 0.99)"],
["max"],
),
}
| SpanPerformanceColumn |
python | google__jax | jax/experimental/mosaic/gpu/launch_context.py | {
"start": 2568,
"end": 2673
} | class ____(enum.Enum):
UP = enum.auto()
DOWN = enum.auto()
@dataclasses.dataclass(frozen=True)
| Rounding |
python | pydantic__pydantic | pydantic/v1/types.py | {
"start": 28725,
"end": 32326
} | class ____(str):
"""
Based on: https://en.wikipedia.org/wiki/Payment_card_number
"""
strip_whitespace: ClassVar[bool] = True
min_length: ClassVar[int] = 12
max_length: ClassVar[int] = 19
bin: str
last4: str
brand: PaymentCardBrand
def __init__(self, card_number: str):
self.bin = card_number[:6]
self.last4 = card_number[-4:]
self.brand = self._get_brand(card_number)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield str_validator
yield constr_strip_whitespace
yield constr_length_validator
yield cls.validate_digits
yield cls.validate_luhn_check_digit
yield cls
yield cls.validate_length_for_brand
@property
def masked(self) -> str:
num_masked = len(self) - 10 # len(bin) + len(last4) == 10
return f'{self.bin}{"*" * num_masked}{self.last4}'
@classmethod
def validate_digits(cls, card_number: str) -> str:
if not card_number.isdigit():
raise errors.NotDigitError
return card_number
@classmethod
def validate_luhn_check_digit(cls, card_number: str) -> str:
"""
Based on: https://en.wikipedia.org/wiki/Luhn_algorithm
"""
sum_ = int(card_number[-1])
length = len(card_number)
parity = length % 2
for i in range(length - 1):
digit = int(card_number[i])
if i % 2 == parity:
digit *= 2
if digit > 9:
digit -= 9
sum_ += digit
valid = sum_ % 10 == 0
if not valid:
raise errors.LuhnValidationError
return card_number
@classmethod
def validate_length_for_brand(cls, card_number: 'PaymentCardNumber') -> 'PaymentCardNumber':
"""
Validate length based on BIN for major brands:
https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_(IIN)
"""
required_length: Union[None, int, str] = None
if card_number.brand in PaymentCardBrand.mastercard:
required_length = 16
valid = len(card_number) == required_length
elif card_number.brand == PaymentCardBrand.visa:
required_length = '13, 16 or 19'
valid = len(card_number) in {13, 16, 19}
elif card_number.brand == PaymentCardBrand.amex:
required_length = 15
valid = len(card_number) == required_length
else:
valid = True
if not valid:
raise errors.InvalidLengthForBrand(brand=card_number.brand, required_length=required_length)
return card_number
@staticmethod
def _get_brand(card_number: str) -> PaymentCardBrand:
if card_number[0] == '4':
brand = PaymentCardBrand.visa
elif 51 <= int(card_number[:2]) <= 55:
brand = PaymentCardBrand.mastercard
elif card_number[:2] in {'34', '37'}:
brand = PaymentCardBrand.amex
else:
brand = PaymentCardBrand.other
return brand
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BYTE SIZE TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
BYTE_SIZES = {
'b': 1,
'kb': 10**3,
'mb': 10**6,
'gb': 10**9,
'tb': 10**12,
'pb': 10**15,
'eb': 10**18,
'kib': 2**10,
'mib': 2**20,
'gib': 2**30,
'tib': 2**40,
'pib': 2**50,
'eib': 2**60,
}
BYTE_SIZES.update({k.lower()[0]: v for k, v in BYTE_SIZES.items() if 'i' not in k})
byte_string_re = re.compile(r'^\s*(\d*\.?\d+)\s*(\w+)?', re.IGNORECASE)
| PaymentCardNumber |
python | google__pytype | pytype/overlays/dataclass_overlay.py | {
"start": 6443,
"end": 6728
} | class ____(abstract.SimpleValue):
"""Return value of a field() call."""
def __init__(self, ctx, init, default, kw_only):
super().__init__("field", ctx)
self.init = init
self.default = default
self.kw_only = kw_only
self.cls = ctx.convert.unsolvable
| FieldInstance |
python | explosion__spaCy | spacy/schemas.py | {
"start": 2445,
"end": 6529
} | class ____:
extra = "forbid"
arbitrary_types_allowed = True
def get_arg_model(
func: Callable,
*,
exclude: Iterable[str] = tuple(),
name: str = "ArgModel",
strict: bool = True,
) -> ModelMetaclass:
"""Generate a pydantic model for function arguments.
func (Callable): The function to generate the schema for.
exclude (Iterable[str]): Parameter names to ignore.
name (str): Name of created model class.
strict (bool): Don't allow extra arguments if no variable keyword arguments
are allowed on the function.
RETURNS (ModelMetaclass): A pydantic model.
"""
sig_args = {}
try:
sig = inspect.signature(func)
except ValueError:
# Typically happens if the method is part of a Cython module without
# binding=True. Here we just use an empty model that allows everything.
return create_model(name, __config__=ArgSchemaConfigExtra) # type: ignore[arg-type, return-value]
has_variable = False
for param in sig.parameters.values():
if param.name in exclude:
continue
if param.kind == param.VAR_KEYWORD:
# The function allows variable keyword arguments so we shouldn't
# include **kwargs etc. in the schema and switch to non-strict
# mode and pass through all other values
has_variable = True
continue
# If no annotation is specified assume it's anything
annotation = param.annotation if param.annotation != param.empty else Any
# If no default value is specified assume that it's required. Cython
# functions/methods will have param.empty for default value None so we
# need to treat them differently
default_empty = None if is_cython_func(func) else ...
default = param.default if param.default != param.empty else default_empty
sig_args[param.name] = (annotation, default)
is_strict = strict and not has_variable
sig_args["__config__"] = ArgSchemaConfig if is_strict else ArgSchemaConfigExtra # type: ignore[assignment]
return create_model(name, **sig_args) # type: ignore[call-overload, arg-type, return-value]
def validate_init_settings(
func: Callable,
settings: Dict[str, Any],
*,
section: Optional[str] = None,
name: str = "",
exclude: Iterable[str] = ("get_examples", "nlp"),
) -> Dict[str, Any]:
"""Validate initialization settings against the expected arguments in
the method signature. Will parse values if possible (e.g. int to string)
and return the updated settings dict. Will raise a ConfigValidationError
if types don't match or required values are missing.
func (Callable): The initialize method of a given component etc.
settings (Dict[str, Any]): The settings from the respective [initialize] block.
section (str): Initialize section, for error message.
name (str): Name of the block in the section.
exclude (Iterable[str]): Parameter names to exclude from schema.
RETURNS (Dict[str, Any]): The validated settings.
"""
schema = get_arg_model(func, exclude=exclude, name="InitArgModel")
try:
return schema(**settings).dict()
except ValidationError as e:
block = "initialize" if not section else f"initialize.{section}"
title = f"Error validating initialization settings in [{block}]"
raise ConfigValidationError(
title=title, errors=e.errors(), config=settings, parent=name
) from None
# Matcher token patterns
def validate_token_pattern(obj: list) -> List[str]:
# Try to convert non-string keys (e.g. {ORTH: "foo"} -> {"ORTH": "foo"})
get_key = lambda k: NAMES[k] if isinstance(k, int) and k < len(NAMES) else k
if isinstance(obj, list):
converted = []
for pattern in obj:
if isinstance(pattern, dict):
pattern = {get_key(k): v for k, v in pattern.items()}
converted.append(pattern)
obj = converted
return validate(TokenPatternSchema, {"pattern": obj})
| ArgSchemaConfigExtra |
python | davidhalter__jedi | jedi/inference/gradual/stub_value.py | {
"start": 2624,
"end": 3343
} | class ____(ParserTreeFilter):
name_class = StubName
def _is_name_reachable(self, name):
if not super()._is_name_reachable(name):
return False
# Imports in stub files are only public if they have an "as"
# export.
definition = name.get_definition()
if definition is None:
return False
if definition.type in ('import_from', 'import_name'):
if name.parent.type not in ('import_as_name', 'dotted_as_name'):
return False
n = name.value
# TODO rewrite direct return
if n.startswith('_') and not (n.startswith('__') and n.endswith('__')):
return False
return True
| StubFilter |
python | davidhalter__jedi | jedi/inference/names.py | {
"start": 3275,
"end": 8432
} | class ____(AbstractNameDefinition):
def __init__(self, parent_context, tree_name):
self.parent_context = parent_context
self.tree_name = tree_name
def get_qualified_names(self, include_module_names=False):
import_node = search_ancestor(self.tree_name, 'import_name', 'import_from')
# For import nodes we cannot just have names, because it's very unclear
# how they would look like. For now we just ignore them in most cases.
# In case of level == 1, it works always, because it's like a submodule
# lookup.
if import_node is not None and not (import_node.level == 1
and self.get_root_context().get_value().is_package()):
# TODO improve the situation for when level is present.
if include_module_names and not import_node.level:
return tuple(n.value for n in import_node.get_path_for_name(self.tree_name))
else:
return None
return super().get_qualified_names(include_module_names)
def _get_qualified_names(self):
parent_names = self.parent_context.get_qualified_names()
if parent_names is None:
return None
return parent_names + (self.tree_name.value,)
def get_defining_qualified_value(self):
if self.is_import():
raise NotImplementedError("Shouldn't really happen, please report")
elif self.parent_context:
return self.parent_context.get_value() # Might be None
return None
def goto(self):
context = self.parent_context
name = self.tree_name
definition = name.get_definition(import_name_always=True)
if definition is not None:
type_ = definition.type
if type_ == 'expr_stmt':
# Only take the parent, because if it's more complicated than just
# a name it's something you can "goto" again.
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
return [self]
elif type_ in ('import_from', 'import_name'):
from jedi.inference.imports import goto_import
module_names = goto_import(context, name)
return module_names
else:
return [self]
else:
from jedi.inference.imports import follow_error_node_imports_if_possible
values = follow_error_node_imports_if_possible(context, name)
if values is not None:
return [value.name for value in values]
par = name.parent
node_type = par.type
if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name:
# Named param goto.
trailer = par.parent
if trailer.type == 'arglist':
trailer = trailer.parent
if trailer.type != 'classdef':
if trailer.type == 'decorator':
value_set = context.infer_node(trailer.children[1])
else:
i = trailer.parent.children.index(trailer)
to_infer = trailer.parent.children[:i]
if to_infer[0] == 'await':
to_infer.pop(0)
value_set = context.infer_node(to_infer[0])
from jedi.inference.syntax_tree import infer_trailer
for trailer in to_infer[1:]:
value_set = infer_trailer(context, value_set, trailer)
param_names = []
for value in value_set:
for signature in value.get_signatures():
for param_name in signature.get_param_names():
if param_name.string_name == name.value:
param_names.append(param_name)
return param_names
elif node_type == 'dotted_name': # Is a decorator.
index = par.children.index(name)
if index > 0:
new_dotted = deep_ast_copy(par)
new_dotted.children[index - 1:] = []
values = context.infer_node(new_dotted)
return unite(
value.goto(name, name_context=context)
for value in values
)
if node_type == 'trailer' and par.children[0] == '.':
values = infer_call_of_leaf(context, name, cut_own_trailer=True)
return values.goto(name, name_context=context)
else:
stmt = search_ancestor(
name, 'expr_stmt', 'lambdef'
) or name
if stmt.type == 'lambdef':
stmt = name
return context.goto(name, position=stmt.start_pos)
def is_import(self):
imp = search_ancestor(self.tree_name, 'import_from', 'import_name')
return imp is not None
@property
def string_name(self):
return self.tree_name.value
@property
def start_pos(self):
return self.tree_name.start_pos
| AbstractTreeName |
python | numpy__numpy | tools/swig/test/testTensor.py | {
"start": 11943,
"end": 12247
} | class ____(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
self.result = int(self.result)
######################################################################
| ucharTestCase |
python | pytorch__pytorch | tools/test/heuristics/test_heuristics.py | {
"start": 4716,
"end": 6486
} | class ____(TestTD):
def test_get_keywords(self) -> None:
self.assertEqual(get_keywords("test/test_car.py"), ["car"])
self.assertEqual(get_keywords("test/nn/test_amp.py"), ["nn", "amp"])
self.assertEqual(get_keywords("torch/nn/test_amp.py"), ["nn", "amp"])
self.assertEqual(
get_keywords("torch/nn/mixed_precision/test_something.py"),
["nn", "amp", "something"],
)
def test_match_keywords(self) -> None:
self.assertTrue(file_matches_keyword("test/quantization/test_car.py", "quant"))
self.assertTrue(file_matches_keyword("test/test_quantization.py", "quant"))
self.assertTrue(file_matches_keyword("test/nn/test_amp.py", "nn"))
self.assertTrue(file_matches_keyword("test/nn/test_amp.py", "amp"))
self.assertTrue(file_matches_keyword("test/test_onnx.py", "onnx"))
self.assertFalse(file_matches_keyword("test/test_onnx.py", "nn"))
def test_get_keywords_match(self) -> None:
def helper(test_file: str, changed_file: str) -> bool:
return any(
file_matches_keyword(test_file, x) for x in get_keywords(changed_file)
)
self.assertTrue(helper("test/quantization/test_car.py", "quantize/t.py"))
self.assertFalse(helper("test/onnx/test_car.py", "nn/t.py"))
self.assertTrue(helper("test/nn/test_car.py", "nn/t.py"))
self.assertFalse(helper("test/nn/test_car.py", "test/b.py"))
self.assertTrue(helper("test/test_mixed_precision.py", "torch/amp/t.py"))
self.assertTrue(helper("test/test_amp.py", "torch/mixed_precision/t.py"))
self.assertTrue(helper("test/idk/other/random.py", "torch/idk/t.py"))
if __name__ == "__main__":
unittest.main()
| TestFilePath |
python | jazzband__django-oauth-toolkit | tests/app/idp/idp/apps.py | {
"start": 1006,
"end": 1152
} | class ____(AppConfig):
name = "idp"
default = True
def ready(self):
check_request_enabled.connect(cors_allow_origin)
| IDPAppConfig |
python | spack__spack | var/spack/test_repos/spack_repo/edges_test/packages/blas_only_client/package.py | {
"start": 217,
"end": 603
} | class ____(Package):
"""This package depends on the 'blas' virtual only, but should be able to use also provider
that provide e.g. 'blas' together with 'lapack'.
"""
homepage = "http://www.openblas.net"
url = "http://github.com/xianyi/OpenBLAS/archive/v0.2.15.tar.gz"
version("0.2.16", md5="b1190f3d3471685f17cfd1ec1d252ac9")
depends_on("blas")
| BlasOnlyClient |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/daemon.py | {
"start": 1347,
"end": 1809
} | class ____(BaseModel):
enabled: bool
type: RunCoordinatorType
config: RunCoordinatorConfig
model_config = ConfigDict(
extra="forbid",
json_schema_extra={
"allOf": create_json_schema_conditionals(
{
RunCoordinatorType.QUEUED: "queuedRunCoordinator",
RunCoordinatorType.CUSTOM: "customRunCoordinator",
}
)
},
)
| RunCoordinator |
python | doocs__leetcode | solution/2400-2499/2492.Minimum Score of a Path Between Two Cities/Solution.py | {
"start": 0,
"end": 489
} | class ____:
def minScore(self, n: int, roads: List[List[int]]) -> int:
def dfs(i):
nonlocal ans
for j, d in g[i]:
ans = min(ans, d)
if not vis[j]:
vis[j] = True
dfs(j)
g = defaultdict(list)
for a, b, d in roads:
g[a].append((b, d))
g[b].append((a, d))
vis = [False] * (n + 1)
ans = inf
dfs(1)
return ans
| Solution |
python | PyCQA__pylint | tests/functional/i/init_not_called.py | {
"start": 1178,
"end": 1254
} | class ____:
def __init__(self, num: int):
self.number = num
| Parent |
python | huggingface__transformers | tests/models/blip/test_modeling_blip.py | {
"start": 25431,
"end": 28504
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (BlipForQuestionAnswering,) if is_torch_available() else ()
# Doesn't run generation tests due to custom generation logic -- won't fix
all_generative_model_classes = ()
test_resize_embeddings = True
test_attention_outputs = False
def setUp(self):
self.model_tester = BlipVQAModelTester(self)
def _prepare_inputs_for_vqa(self):
_, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"]
inputs_dict.pop("return_loss")
return inputs_dict
def test_class_name_consistency(self):
"""
Tests that all VQA models have a class name that ends with "ForQuestionAnswering"
"""
for model_class in self.all_model_classes:
model = model_class(self.model_tester.get_config())
self.assertTrue(
model.__class__.__name__.endswith("ForQuestionAnswering"),
f"Class name should end with 'ForVisualQuestionAnswering' got {model.__class__.__name__}",
)
def test_training(self):
"""
Tests that all VQA models can be trained on a single batch
"""
for model_class in self.all_model_classes:
model = model_class(self.model_tester.get_config()).to(torch_device)
model.train()
loss = model(**self.model_tester.prepare_config_and_inputs_for_common()[1]).loss
loss.backward()
# verify the gradients are not None
for name, param in model.named_parameters():
self.assertIsNotNone(param.grad, f"Gradients should not be None - got {param.grad} for {name}")
def test_forward_signature(self):
"""
Test if the forward function has the expected arguments.
"""
for model_class in self.all_model_classes:
model = model_class(self.model_tester.get_config())
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so args are the first n entries
args = list(signature.parameters.keys())
expected_args = [
"input_ids",
"attention_mask",
"labels",
"decoder_input_ids",
"decoder_attention_mask",
]
for arg in expected_args:
self.assertTrue(
arg in args,
f"Argument {arg} of forward function signature should include {arg}. Found {args}.",
)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="BlipModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
@require_torch
| BlipVQAModelTest |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 120730,
"end": 122371
} | class ____(SingleContinuousDistribution):
_argnames = ('R',)
@property
def set(self):
return Interval(-self.R, self.R)
@staticmethod
def check(R):
_value_check(R > 0, "Radius R must be positive.")
def pdf(self, x):
R = self.R
return 2/(pi*R**2)*sqrt(R**2 - x**2)
def _characteristic_function(self, t):
return Piecewise((2 * besselj(1, self.R*t) / (self.R*t), Ne(t, 0)),
(S.One, True))
def _moment_generating_function(self, t):
return Piecewise((2 * besseli(1, self.R*t) / (self.R*t), Ne(t, 0)),
(S.One, True))
def WignerSemicircle(name, R):
r"""
Create a continuous random variable with a Wigner semicircle distribution.
Explanation
===========
The density of the Wigner semicircle distribution is given by
.. math::
f(x) := \frac2{\pi R^2}\,\sqrt{R^2-x^2}
with :math:`x \in [-R,R]`.
Parameters
==========
R : Real number, `R > 0`, the radius
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import WignerSemicircle, density, E
>>> from sympy import Symbol
>>> R = Symbol("R", positive=True)
>>> z = Symbol("z")
>>> X = WignerSemicircle("x", R)
>>> density(X)(z)
2*sqrt(R**2 - z**2)/(pi*R**2)
>>> E(X)
0
References
==========
.. [1] https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
.. [2] https://mathworld.wolfram.com/WignersSemicircleLaw.html
"""
return rv(name, WignerSemicircleDistribution, (R,))
| WignerSemicircleDistribution |
python | airbytehq__airbyte | airbyte-ci/connectors/live-tests/src/live_tests/regression_tests/test_read.py | {
"start": 669,
"end": 25484
} | class ____:
"""This class contains tests that check if the data integrity is preserved between the control and target versions.
The tests have some overlap but they are meant to be gradually stricter in terms of integrity checks.
1. test_record_count: On each stream, check if the target version produces at least the same number of records as the control version.
2. test_all_pks_are_produced_in_target_version: On each stream, check if all primary key values produced by the control version are present in the target version.
3. test_all_records_are_the_same: On each stream, check if all records produced by the control version are the same as in the target version. This will write a diff of the records to the test artifacts.
All these test have a full refresh and incremental variant.
"""
async def _check_all_pks_are_produced_in_target_version(
self,
request: SubRequest,
record_property: Callable,
read_with_state_control_execution_result: ExecutionResult,
read_with_state_target_execution_result: ExecutionResult,
) -> None:
"""This test gathers all primary key values from the control version and checks if they are present in the target version for each stream.
If there are missing primary keys, the test fails and the missing records are stored in the test artifacts.
Args:
request (SubRequest): The test request.
record_property (Callable): A callable for stashing information on the report.
streams: (Iterable[str]): The list of streams configured for the connection.
primary_keys_per_stream (Dict[str, Optional[List[str]]]): The primary keys for each stream.
read_with_state_control_execution_result (ExecutionResult): The control version execution result.
read_with_state_target_execution_result (ExecutionResult): The target version execution result.
"""
if not read_with_state_control_execution_result.primary_keys_per_stream:
pytest.skip("No primary keys provided on any stream. Skipping the test.")
logger = get_test_logger(request)
streams_with_missing_records = set()
for stream_name in read_with_state_control_execution_result.configured_streams:
_primary_key = read_with_state_control_execution_result.primary_keys_per_stream[stream_name]
if not _primary_key:
# TODO: report skipped PK test per individual stream
logger.warning(f"No primary keys provided on stream {stream_name}.")
continue
primary_key = _primary_key[0] if isinstance(_primary_key, list) else _primary_key
control_pks = set()
target_pks = set()
logger.info(f"Retrieving primary keys for stream {stream_name} on control version.")
for control_record in read_with_state_control_execution_result.get_records_per_stream(stream_name):
control_pks.add(control_record.record.data[primary_key])
logger.info(f"Retrieving primary keys for stream {stream_name} on target version.")
for target_record in read_with_state_target_execution_result.get_records_per_stream(stream_name):
target_pks.add(target_record.record.data[primary_key])
if missing_pks := control_pks - target_pks:
logger.warning(f"Found {len(missing_pks)} primary keys for stream {stream_name}. Retrieving missing records.")
streams_with_missing_records.add(stream_name)
missing_records = [
r
for r in read_with_state_control_execution_result.get_records_per_stream(stream_name)
if r.record.data[primary_key] in missing_pks
]
record_property(
f"Missing records on stream {stream_name}",
json.dumps(missing_records),
)
artifact_path = write_string_to_test_artifact(
request,
json.dumps(missing_records),
f"missing_records_{stream_name}.json",
subdir=request.node.name,
)
logger.info(f"Missing records for stream {stream_name} are stored in {artifact_path}.")
if streams_with_missing_records:
pytest.fail(f"Missing records for streams: {', '.join(streams_with_missing_records)}.")
async def _check_record_counts(
self,
record_property: Callable,
read_control_execution_result: ExecutionResult,
read_target_execution_result: ExecutionResult,
) -> None:
record_count_difference_per_stream: dict[str, dict[str, int]] = {}
for stream_name in read_control_execution_result.configured_streams:
control_records_count = sum(1 for _ in read_control_execution_result.get_records_per_stream(stream_name))
target_records_count = sum(1 for _ in read_target_execution_result.get_records_per_stream(stream_name))
difference = {
"delta": target_records_count - control_records_count,
"control": control_records_count,
"target": target_records_count,
}
if difference["delta"] != 0:
record_count_difference_per_stream[stream_name] = difference
error_messages = []
for stream, difference in record_count_difference_per_stream.items():
if difference["delta"] > 0:
error_messages.append(
f"Stream {stream} has {difference['delta']} more records in the target version ({difference['target']} vs. {difference['control']})."
)
if difference["delta"] < 0:
error_messages.append(
f"Stream {stream} has {-difference['delta']} fewer records in the target version({difference['target']} vs. {difference['control']})."
)
if error_messages:
record_property("Record count differences", "\n".join(error_messages))
pytest.fail("Record counts are different.")
async def _check_all_records_are_the_same(
self,
request: SubRequest,
record_property: Callable,
read_control_execution_result: ExecutionResult,
read_target_execution_result: ExecutionResult,
) -> None:
"""This test checks if all records in the control version are present in the target version for each stream.
If there are mismatches, the test fails and the missing records are stored in the test artifacts.
It will catch differences in record schemas, missing records, and extra records.
Args:
request (SubRequest): The test request.
read_control_execution_result (ExecutionResult): The control version execution result.
read_target_execution_result (ExecutionResult): The target version execution result.
"""
streams_with_diff = set()
for stream in read_control_execution_result.configured_streams:
control_records = list(read_control_execution_result.get_records_per_stream(stream))
target_records = list(read_target_execution_result.get_records_per_stream(stream))
if control_records and not target_records:
pytest.fail(f"Stream {stream} is missing in the target version.")
if primary_key := read_control_execution_result.primary_keys_per_stream.get(stream):
diffs = self._get_diff_on_stream_with_pk(
request,
record_property,
stream,
control_records,
target_records,
primary_key,
)
else:
diffs = self._get_diff_on_stream_without_pk(
request,
record_property,
stream,
control_records,
target_records,
)
if diffs:
streams_with_diff.add(stream)
if streams_with_diff:
messages = [
f"Records for stream {stream} are different. Please check the diff in the test artifacts for debugging."
for stream in sorted(streams_with_diff)
]
pytest.fail("/n".join(messages))
def _check_record_schema_match(
self,
request: SubRequest,
record_property: Callable,
control_execution_result: ExecutionResult,
target_execution_result: ExecutionResult,
) -> None:
"""This test checks if the schema of the records in the control and target versions match.
It compares the meta schema inferred for each streams on the control and target versions.
It also fetches an example record for each stream from the DuckDB instance and compares the schema of the records.
Args:
record_property (Callable): The record property to store the mismatching fields.
control_execution_result (ExecutionResult): The control version execution result.
target_execution_result (ExecutionResult): The target version execution result.
"""
logger = get_test_logger(request)
assert control_execution_result.stream_schemas is not None, "Control schemas were not inferred."
assert target_execution_result.stream_schemas is not None, "Target schemas were not inferred."
mismatches_count = 0
for stream in control_execution_result.stream_schemas:
control_schema = control_execution_result.stream_schemas.get(stream, {})
if not control_schema:
logger.warning(f"Stream {stream} was not found in the control results.")
target_schema = target_execution_result.stream_schemas.get(stream, {})
if control_schema and not target_schema:
logger.warning(f"Stream {stream} was present in the control results but not in the target results.")
diff = DeepDiff(control_schema, target_schema, ignore_order=True)
if diff:
record_property(f"{stream} diff between control and target version", diff.pretty())
try:
control_record = next(control_execution_result.get_records_per_stream(stream))
control_example = json.dumps(control_record.record.data, indent=2)
record_property(f"{stream} example record for control version", control_example)
except StopIteration:
logger.warning(f"Stream {stream} has no record in the control version.")
try:
target_record = next(target_execution_result.get_records_per_stream(stream))
target_example = json.dumps(target_record.record.data, indent=2)
record_property(f"{stream} example record for target version", target_example)
except StopIteration:
logger.warning(f"Stream {stream} has no record in the target version.")
mismatches_count += 1
if mismatches_count > 0:
pytest.fail(f"{mismatches_count} streams have mismatching schemas between control and target versions.")
@pytest.mark.with_state()
async def test_record_count_with_state(
self,
record_property: Callable,
read_with_state_control_execution_result: ExecutionResult,
read_with_state_target_execution_result: ExecutionResult,
) -> None:
"""This test compares the record counts between the control and target versions on each stream.
Records are pulled from the output of the read command to which the connection state is passed.
It fails if there are any differences in the record counts.
It is not bulletproof, if the upstream source supports insertion or deletion it may lead to false positives.
The HTTP cache used between the control and target versions command execution might limit this problem.
Extra records in the target version might mean that a bug was fixed, but it could also mean that the target version produces duplicates.
We should add a new test for duplicates and not fail this one if extra records are found.
More advanced checks are done in the other tests.
"""
fail_test_on_failing_execution_results(
record_property,
[
read_with_state_control_execution_result,
read_with_state_target_execution_result,
],
)
await self._check_record_counts(
record_property,
read_with_state_control_execution_result,
read_with_state_target_execution_result,
)
@pytest.mark.without_state()
async def test_record_count_without_state(
self,
record_property: Callable,
read_control_execution_result: ExecutionResult,
read_target_execution_result: ExecutionResult,
) -> None:
"""This test compares the record counts between the control and target versions on each stream.
Records are pulled from the output of the read command to which no connection state is passed (leading to a full-refresh like sync).
It fails if there are any differences in the record counts.
It is not bulletproof, if the upstream source supports insertion or deletion it may lead to false positives.
The HTTP cache used between the control and target versions command execution might limit this problem.
Extra records in the target version might mean that a bug was fixed, but it could also mean that the target version produces duplicates.
We should add a new test for duplicates and not fail this one if extra records are found.
More advanced checks are done in the other tests.
"""
fail_test_on_failing_execution_results(
record_property,
[
read_control_execution_result,
read_target_execution_result,
],
)
await self._check_record_counts(
record_property,
read_control_execution_result,
read_target_execution_result,
)
@pytest.mark.with_state()
async def test_all_pks_are_produced_in_target_version_with_state(
self,
request: SubRequest,
record_property: Callable,
read_with_state_control_execution_result: ExecutionResult,
read_with_state_target_execution_result: ExecutionResult,
) -> None:
"""This test checks if all primary key values produced by the control version are present in the target version for each stream.
It is reading the records from the output of the read command to which the connection state is passed.
A failing test means that the target version is missing some records.
"""
fail_test_on_failing_execution_results(
record_property,
[
read_with_state_control_execution_result,
read_with_state_target_execution_result,
],
)
await self._check_all_pks_are_produced_in_target_version(
request,
record_property,
read_with_state_control_execution_result,
read_with_state_target_execution_result,
)
@pytest.mark.without_state()
async def test_all_pks_are_produced_in_target_version_without_state(
self,
request: SubRequest,
record_property: Callable,
read_control_execution_result: ExecutionResult,
read_target_execution_result: ExecutionResult,
) -> None:
"""This test checks if all primary key values produced by the control version are present in the target version for each stream.
Records are pulled from the output of the read command to which no connection state is passed (leading to a full-refresh like sync).
A failing test means that the target version is missing some records.
"""
fail_test_on_failing_execution_results(
record_property,
[
read_control_execution_result,
read_target_execution_result,
],
)
await self._check_all_pks_are_produced_in_target_version(
request,
record_property,
read_control_execution_result,
read_target_execution_result,
)
@pytest.mark.with_state()
async def test_record_schema_match_with_state(
self,
request: SubRequest,
record_property: Callable,
read_with_state_control_execution_result: ExecutionResult,
read_with_state_target_execution_result: ExecutionResult,
) -> None:
"""This test checks if the schema of the streams in the control and target versions match.
It produces a meta schema for each stream on control and target version and compares them.
It is not using the catalog schema, but inferring schemas from the actual records produced by the read command.
Records are pulled from the output of the read command to which the connection state is passed.
"""
self._check_record_schema_match(
request,
record_property,
read_with_state_control_execution_result,
read_with_state_target_execution_result,
)
@pytest.mark.without_state()
async def test_record_schema_match_without_state(
self,
request: SubRequest,
record_property: Callable,
read_control_execution_result: ExecutionResult,
read_target_execution_result: ExecutionResult,
) -> None:
"""This test checks if the schema of the streams in the control and target versions match.
It produces a meta schema for each stream on control and target version and compares them.
It is not using the catalog schema, but inferring schemas from the actual records produced by the read command.
Records are pulled from the output of the read command to which the connection state is passed.
"""
self._check_record_schema_match(
request,
record_property,
read_control_execution_result,
read_target_execution_result,
)
@pytest.mark.allow_diagnostic_mode
@pytest.mark.with_state()
async def test_all_records_are_the_same_with_state(
self,
request: SubRequest,
record_property: Callable,
read_with_state_control_execution_result: ExecutionResult,
read_with_state_target_execution_result: ExecutionResult,
) -> None:
"""This test compares all records between the control and target versions on each stream.
It is very sensitive to record schema and order changes.
It fails if there are any differences in the records.
It is reading the records from the output of the read command to which the connection state is passed.
"""
fail_test_on_failing_execution_results(
record_property,
[
read_with_state_control_execution_result,
read_with_state_target_execution_result,
],
)
await self._check_all_records_are_the_same(
request,
record_property,
read_with_state_control_execution_result,
read_with_state_target_execution_result,
)
@pytest.mark.allow_diagnostic_mode
@pytest.mark.without_state()
async def test_all_records_are_the_same_without_state(
self,
request: SubRequest,
record_property: Callable,
read_control_execution_result: ExecutionResult,
read_target_execution_result: ExecutionResult,
) -> None:
"""This test compares all records between the control and target versions on each stream.
It is very sensitive to record schema and order changes.
It fails if there are any differences in the records.
It is reading the records from the output of the read command to which no connection state is passed (leading to a full-refresh like sync).
"""
fail_test_on_failing_execution_results(
record_property,
[
read_control_execution_result,
read_target_execution_result,
],
)
await self._check_all_records_are_the_same(
request,
record_property,
read_control_execution_result,
read_target_execution_result,
)
def _get_diff_on_stream_with_pk(
self,
request: SubRequest,
record_property: Callable,
stream: str,
control_records: list[AirbyteMessage],
target_records: list[AirbyteMessage],
primary_key: list[str],
) -> Optional[Iterable[str]]:
control_pks = {r.record.data[primary_key[0]] for r in control_records}
target_pks = {r.record.data[primary_key[0]] for r in target_records}
# Compare the diff for all records whose primary key is in
record_diff_path_prefix = f"{stream}_record_diff"
record_diff = get_and_write_diff(
request,
_get_filtered_sorted_records(control_records, target_pks, True, primary_key),
_get_filtered_sorted_records(target_records, control_pks, True, primary_key),
record_diff_path_prefix,
ignore_order=False,
exclude_paths=EXCLUDE_PATHS,
)
control_records_diff_path_prefix = f"{stream}_control_records_diff"
control_records_diff = get_and_write_diff(
request,
_get_filtered_sorted_records(control_records, target_pks, False, primary_key),
[],
control_records_diff_path_prefix,
ignore_order=False,
exclude_paths=EXCLUDE_PATHS,
)
target_records_diff_path_prefix = f"{stream}_target_records_diff"
target_records_diff = get_and_write_diff(
request,
[],
_get_filtered_sorted_records(target_records, control_pks, False, primary_key),
target_records_diff_path_prefix,
ignore_order=False,
exclude_paths=EXCLUDE_PATHS,
)
has_diff = record_diff or control_records_diff or target_records_diff
if has_diff:
record_property(
f"{stream} stream: records with primary key in target & control whose values differ",
record_diff,
)
record_property(
f"{stream} stream: records in control but not target",
control_records_diff,
)
record_property(
f"{stream} stream: records in target but not control",
target_records_diff,
)
return (record_diff, control_records_diff, target_records_diff)
return None
def _get_diff_on_stream_without_pk(
self,
request: SubRequest,
record_property: Callable,
stream: str,
control_records: list[AirbyteMessage],
target_records: list[AirbyteMessage],
) -> Optional[Iterable[str]]:
diff = get_and_write_diff(
request,
[json.loads(r.record.json(sort_keys=True)) for r in control_records],
[json.loads(r.record.json(sort_keys=True)) for r in target_records],
f"{stream}_diff",
ignore_order=True,
exclude_paths=EXCLUDE_PATHS,
)
if diff:
record_property(f"Diff for stream {stream}", diff)
return (diff,)
return None
def _get_filtered_sorted_records(
records: list[AirbyteMessage],
primary_key_set: set[Generator[Any, Any, None]],
include_target: bool,
primary_key: list[str],
) -> list[dict]:
"""
Get a list of records sorted by primary key, and filtered as specified.
For example, if `include_target` is true, we filter the records such that
only those whose primary key is in `primary_key_set` are returned.
If `include_target` is false, we only return records whose primary key
is not in `primary_key_set`.
"""
if include_target:
_filter = lambda x: x["data"].get(primary_key[0]) in primary_key_set
else:
_filter = lambda x: x["data"].get(primary_key[0]) not in primary_key_set
return sorted(
filter(
_filter,
[json.loads(s.record.json(sort_keys=True)) for s in records],
),
key=lambda x: x["data"][primary_key[0]],
)
| TestDataIntegrity |
python | getsentry__sentry | tests/sentry/utils/email/test_backend.py | {
"start": 102,
"end": 735
} | class ____(TestCase):
def test_get_mail_backend(self) -> None:
with self.options({"mail.backend": "smtp"}):
assert get_mail_backend() == "django.core.mail.backends.smtp.EmailBackend"
with self.options({"mail.backend": "dummy"}):
assert get_mail_backend() == "django.core.mail.backends.dummy.EmailBackend"
with self.options({"mail.backend": "console"}):
assert get_mail_backend() == "django.core.mail.backends.console.EmailBackend"
with self.options({"mail.backend": "something.else"}):
assert get_mail_backend() == "something.else"
| GetMailBackendTest |
python | astropy__astropy | astropy/io/fits/tests/test_fitsdiff.py | {
"start": 550,
"end": 12094
} | class ____(FitsTestCase):
def test_help(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main(["-h"])
assert e.value.code == 0
def test_version(self, capsys):
with pytest.raises(SystemExit) as e:
fitsdiff.main(["--version"])
out = capsys.readouterr()[0]
assert out == f"fitsdiff {version}"
assert e.value.code == 0
def test_noargs(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main([""])
assert e.value.code == 2
def test_oneargargs(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main(["file1"])
assert e.value.code == 2
def test_nodiff(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 0
def test_onediff(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 12
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 1
def test_manydiff(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a + 1
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
out, err = capsys.readouterr()
assert numdiff == 1
assert out.splitlines()[-6:] == [
" a> 9",
" b> 10",
" ...",
" 100 different pixels found (100.00% different).",
" Maximum relative difference: 1.0",
" Maximum absolute difference: 1.0",
]
numdiff = fitsdiff.main(["-n", "1", tmp_a, tmp_b])
out, err = capsys.readouterr()
assert numdiff == 1
assert out.splitlines()[-6:] == [
" a> 0",
" b> 1",
" ...",
" 100 different pixels found (100.00% different).",
" Maximum relative difference: 1.0",
" Maximum absolute difference: 1.0",
]
def test_outputfile(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 20
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-o", self.temp("diff.txt"), tmp_a, tmp_b])
assert numdiff == 1
with open(self.temp("diff.txt")) as f:
out = f.read()
assert out.splitlines()[-6:] == [
" Data differs at [1, 2]:",
" a> 10",
" b> 20",
" 1 different pixels found (1.00% different).",
" Maximum relative difference: 0.5",
" Maximum absolute difference: 10.0",
]
def test_atol(self):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-a", "1", tmp_a, tmp_b])
assert numdiff == 0
numdiff = fitsdiff.main(["--exact", "-a", "1", tmp_a, tmp_b])
assert numdiff == 1
def test_rtol(self):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-r", "1e-1", tmp_a, tmp_b])
assert numdiff == 0
def test_rtol_diff(self, capsys):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 20
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-r", "1e-2", tmp_a, tmp_b])
assert numdiff == 1
out, err = capsys.readouterr()
assert (
out
== f"""
fitsdiff: {version}
a: {tmp_a}
b: {tmp_b}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.01, Absolute tolerance: 0.0
Primary HDU:
Data contains differences:
Data differs at [1, 2]:
a> 10.0
? ^
b> 20.0
? ^
1 different pixels found (1.00% different).
Maximum relative difference: 0.5
Maximum absolute difference: 10.0
"""
)
assert err == ""
def test_wildcard(self):
tmp1 = self.temp("tmp_file1")
with pytest.raises(SystemExit) as e:
fitsdiff.main([tmp1 + "*", "ACME"])
assert e.value.code == 2
def test_not_quiet(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 0
out, err = capsys.readouterr()
assert (
out
== f"""
fitsdiff: {version}
a: {tmp_a}
b: {tmp_b}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.0, Absolute tolerance: 0.0
No differences found.
"""
)
assert err == ""
def test_quiet(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-q", tmp_a, tmp_b])
assert numdiff == 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_path(self, capsys):
os.mkdir(self.temp("sub/"))
tmp_b = self.temp("sub/ascii.fits")
tmp_g = self.temp("sub/group.fits")
tmp_h = self.data("group.fits")
with hdulist.fitsopen(tmp_h) as hdu_b:
hdu_b.writeto(tmp_g)
writeto(tmp_b, np.arange(100).reshape(10, 10))
# one modified file and a directory
assert fitsdiff.main(["-q", self.data_dir, tmp_b]) == 1
assert fitsdiff.main(["-q", tmp_b, self.data_dir]) == 1
# two directories
tmp_d = self.temp("sub/")
assert fitsdiff.main(["-q", self.data_dir, tmp_d]) == 1
assert fitsdiff.main(["-q", tmp_d, self.data_dir]) == 1
expected_retv = int(not HAS_UNCOMPRESSPY)
assert fitsdiff.main(["-q", self.data_dir, self.data_dir]) == expected_retv
# no match
tmp_c = self.data("arange.fits")
fitsdiff.main([tmp_c, tmp_d])
out, err = capsys.readouterr()
assert "'arange.fits' has no match in" in err
# globbing
assert fitsdiff.main(["-q", self.data_dir + "/*.fits", self.data_dir]) == 0
assert fitsdiff.main(["-q", self.data_dir + "/g*.fits", tmp_d]) == 0
# one file and a directory
tmp_f = self.data("tb.fits")
assert fitsdiff.main(["-q", tmp_f, self.data_dir]) == 0
assert fitsdiff.main(["-q", self.data_dir, tmp_f]) == 0
@pytest.mark.filterwarnings("ignore:unclosed file:ResourceWarning")
def test_warning_unreadable_file(self, capsys, monkeypatch):
# simulate not having uncompresspy installed regardless of the actual state
monkeypatch.setattr(fits.file, "HAS_UNCOMPRESSPY", False)
Zfile = self.data("lzw.fits.Z")
assert fitsdiff.main([Zfile, Zfile]) != 0
out, err = capsys.readouterr()
assert out == ""
assert err == f"Warning: failed to open {Zfile}. Skipping.\n"
os.mkdir(self.temp("sub/"))
tmp = self.temp("sub/ascii.fits")
tmp_h = self.data("group.fits")
with hdulist.fitsopen(tmp_h) as hdu:
hdu.writeto(tmp)
assert fitsdiff.main([Zfile, tmp]) != 0
out, err = capsys.readouterr()
assert out == ""
assert err == f"Warning: failed to open {Zfile} (or {tmp}). Skipping.\n"
assert fitsdiff.main(["-q", Zfile, tmp]) != 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
assert fitsdiff.main([tmp, Zfile]) != 0
out, err = capsys.readouterr()
assert out == ""
assert err == f"Warning: failed to open {tmp} (or {Zfile}). Skipping.\n"
assert fitsdiff.main(["-q", tmp, Zfile]) != 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_ignore_hdus(self):
a = np.arange(100).reshape(10, 10)
b = a.copy() + 1
ha = Header([("A", 1), ("B", 2), ("C", 3)])
phdu_a = PrimaryHDU(header=ha)
phdu_b = PrimaryHDU(header=ha)
ihdu_a = ImageHDU(data=a, name="SCI")
ihdu_b = ImageHDU(data=b, name="SCI")
hdulist_a = HDUList([phdu_a, ihdu_a])
hdulist_b = HDUList([phdu_b, ihdu_b])
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdulist_a.writeto(tmp_a)
hdulist_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 1
numdiff = fitsdiff.main([tmp_a, tmp_b, "-u", "SCI"])
assert numdiff == 0
def test_ignore_hdus_report(self, capsys):
a = np.arange(100).reshape(10, 10)
b = a.copy() + 1
ha = Header([("A", 1), ("B", 2), ("C", 3)])
phdu_a = PrimaryHDU(header=ha)
phdu_b = PrimaryHDU(header=ha)
ihdu_a = ImageHDU(data=a, name="SCI")
ihdu_b = ImageHDU(data=b, name="SCI")
hdulist_a = HDUList([phdu_a, ihdu_a])
hdulist_b = HDUList([phdu_b, ihdu_b])
tmp_a = self.temp("testa.fits")
tmp_b = self.temp("testb.fits")
hdulist_a.writeto(tmp_a)
hdulist_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b, "-u", "SCI"])
assert numdiff == 0
out, err = capsys.readouterr()
assert "testa.fits" in out
assert "testb.fits" in out
@pytest.mark.skip(reason="fails intentionally to show open files (see PR #10159)")
def test_fitsdiff_openfile(tmp_path):
"""Make sure that failing FITSDiff doesn't leave open files."""
path1 = tmp_path / "file1.fits"
path2 = tmp_path / "file2.fits"
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert diff.identical, diff.report()
| TestFITSDiff_script |
python | tensorflow__tensorflow | tensorflow/python/framework/experimental/unified_api_test.py | {
"start": 2205,
"end": 12876
} | class ____(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testAdd(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
return unified_math_ops.add(a, b)
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_output = def_function.function(model)(a, b)
self.assertAllEqual(func_output.numpy(), [4., 6.])
eager_output = model(a, b)
self.assertAllEqual(eager_output.numpy(), [4., 6.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testAddGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
with tape_lib.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
result = unified_math_ops.add(a, b)
grads = tape.gradient(result, [a, b])
return grads
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_outputs = def_function.function(model)(a, b)
self.assertAllEqual(func_outputs[0].numpy(), [1.0, 1.0])
self.assertAllEqual(func_outputs[1].numpy(), [1.0, 1.0])
eager_outputs = model(a, b)
self.assertAllEqual(eager_outputs[0].numpy(), [1.0, 1.0])
self.assertAllEqual(eager_outputs[1].numpy(), [1.0, 1.0])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testRelu(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(t):
return unified_nn_ops.relu(t)
with context_lib.set_default(get_immediate_execution_context()):
positive = TensorCastHelper(constant_op.constant([1.]))
negative = TensorCastHelper(constant_op.constant([-1.]))
model_fn = def_function.function(model)
func_output = model_fn(positive)
self.assertAllEqual(func_output.numpy(), [1.])
func_output = model_fn(negative)
self.assertAllEqual(func_output.numpy(), [0.])
eager_output = model(positive)
self.assertAllEqual(eager_output.numpy(), [1.])
eager_output = model(negative)
self.assertAllEqual(eager_output.numpy(), [0.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testReluGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(t):
with tape_lib.GradientTape() as tape:
tape.watch(t)
result = unified_nn_ops.relu(t)
grads = tape.gradient(result, t)
return grads
with context_lib.set_default(get_immediate_execution_context()):
positive = TensorCastHelper(constant_op.constant([1.]))
negative = TensorCastHelper(constant_op.constant([-1.]))
model_fn = def_function.function(model)
func_output = model_fn(positive)
self.assertAllEqual(func_output.numpy(), [1.])
func_output = model_fn(negative)
self.assertAllEqual(func_output.numpy(), [0.])
eager_output = model(positive)
self.assertAllEqual(eager_output.numpy(), [1.])
eager_output = model(negative)
self.assertAllEqual(eager_output.numpy(), [0.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testNeg(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a):
return unified_math_ops.neg(a)
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([2.]))
func_output = def_function.function(model)(a)
self.assertAllEqual(func_output.numpy(), [-2.])
eager_output = model(a)
self.assertAllEqual(eager_output.numpy(), [-2.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testNegGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a):
with tape_lib.GradientTape() as tape:
tape.watch(a)
result = unified_math_ops.neg(a)
grads = tape.gradient(result, a)
return grads
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([2.]))
func_outputs = def_function.function(model)(a)
self.assertAllEqual(func_outputs.numpy(), [-1.0])
eager_outputs = model(a)
self.assertAllEqual(eager_outputs.numpy(), [-1.0])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testSub(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
return unified_math_ops.sub(a, b)
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_output = def_function.function(model)(a, b)
self.assertAllEqual(func_output.numpy(), [-2., -2.])
eager_output = model(a, b)
self.assertAllEqual(eager_output.numpy(), [-2., -2.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testSubGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
with tape_lib.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
result = unified_math_ops.sub(a, b)
grads = tape.gradient(result, [a, b])
return grads
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_outputs = def_function.function(model)(a, b)
self.assertAllEqual(func_outputs[0].numpy(), [1.0, 1.0])
self.assertAllEqual(func_outputs[1].numpy(), [-1.0, -1.0])
eager_outputs = model(a, b)
self.assertAllEqual(eager_outputs[0].numpy(), [1.0, 1.0])
self.assertAllEqual(eager_outputs[1].numpy(), [-1.0, -1.0])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testMul(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
return unified_math_ops.mul(a, b)
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_output = def_function.function(model)(a, b)
self.assertAllEqual(func_output.numpy(), [3., 8.])
eager_output = model(a, b)
self.assertAllEqual(eager_output.numpy(), [3., 8.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testMulGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
with tape_lib.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
result = unified_math_ops.mul(a, b)
grads = tape.gradient(result, [a, b])
return grads
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1., 2.]))
b = TensorCastHelper(constant_op.constant([3., 4.]))
func_outputs = def_function.function(model)(a, b)
self.assertAllEqual(func_outputs[0].numpy(), [3., 4.])
self.assertAllEqual(func_outputs[1].numpy(), [1., 2.])
eager_outputs = model(a, b)
self.assertAllEqual(eager_outputs[0].numpy(), [3., 4.])
self.assertAllEqual(eager_outputs[1].numpy(), [1., 2.])
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testLog1p(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a):
return unified_math_ops.log1p(a)
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1.]))
func_output = def_function.function(model)(a)
self.assertArrayNear(func_output.numpy(), [0.69314], 0.001)
eager_output = model(a)
self.assertArrayNear(eager_output.numpy(), [0.69314], 0.001)
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testLog1pGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a):
with tape_lib.GradientTape() as tape:
tape.watch(a)
result = unified_math_ops.log1p(a)
grads = tape.gradient(result, a)
return grads
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([1.]))
func_outputs = def_function.function(model)(a)
self.assertArrayNear(func_outputs.numpy(), [0.5], 0.001)
eager_outputs = model(a)
self.assertArrayNear(eager_outputs.numpy(), [0.5], 0.001)
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testDivNoNan(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
return unified_math_ops.div_no_nan(a, b)
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([2.]))
b = TensorCastHelper(constant_op.constant([4.]))
func_output = def_function.function(model)(a, b)
self.assertArrayNear(func_output.numpy(), [0.5], 0.001)
eager_output = model(a, b)
self.assertArrayNear(eager_output.numpy(), [0.5], 0.001)
@parameterized.named_parameters([
("Graph", False),
("Mlir", True),
])
def testDivNoNanGrad(self, use_mlir):
if use_mlir:
SetTracingImplementation("mlir")
def model(a, b):
with tape_lib.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
result = unified_math_ops.div_no_nan(a, b)
grads = tape.gradient(result, [a, b])
return grads
with context_lib.set_default(get_immediate_execution_context()):
a = TensorCastHelper(constant_op.constant([2.]))
b = TensorCastHelper(constant_op.constant([4.]))
func_outputs = def_function.function(model)(a, b)
self.assertArrayNear(func_outputs[0].numpy(), [0.25], 0.001)
self.assertArrayNear(func_outputs[1].numpy(), [-0.125], 0.001)
eager_outputs = model(a, b)
self.assertArrayNear(eager_outputs[0].numpy(), [0.25], 0.001)
self.assertArrayNear(eager_outputs[1].numpy(), [-0.125], 0.001)
| UnifiedApiTest |
python | kamyu104__LeetCode-Solutions | Python/total-cost-to-hire-k-workers.py | {
"start": 73,
"end": 980
} | class ____(object):
def totalCost(self, costs, k, candidates):
"""
:type costs: List[int]
:type k: int
:type candidates: int
:rtype: int
"""
left, right = candidates, max(len(costs)-candidates, candidates)-1
min_heap1, min_heap2 = costs[:left], costs[right+1:]
heapq.heapify(min_heap1), heapq.heapify(min_heap2)
result = 0
for _ in xrange(k):
if not min_heap2 or (min_heap1 and min_heap1[0] <= min_heap2[0]):
result += heapq.heappop(min_heap1)
if left <= right:
heapq.heappush(min_heap1, costs[left])
left += 1
else:
result += heapq.heappop(min_heap2)
if left <= right:
heapq.heappush(min_heap2, costs[right])
right -= 1
return result
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/datacatalog.py | {
"start": 1921,
"end": 2470
} | class ____(BaseGoogleLink):
"""Helper class for constructing Data Catalog Entry Group Link."""
name = "Data Catalog Entry Group"
key = "data_catalog_entry_group"
format_str = ENTRY_GROUP_LINK
@deprecated(
planned_removal_date="January 30, 2026",
use_instead="airflow.providers.google.cloud.links.dataplex.DataplexCatalogEntryLink",
reason="The Data Catalog will be discontinued on January 30, 2026 "
"in favor of Dataplex Universal Catalog.",
category=AirflowProviderDeprecationWarning,
)
| DataCatalogEntryGroupLink |
python | py-pdf__pypdf | pypdf/_page.py | {
"start": 4147,
"end": 10322
} | class ____:
"""
Represent a 2D transformation.
The transformation between two coordinate systems is represented by a 3-by-3
transformation matrix with the following form::
a b 0
c d 0
e f 1
Because a transformation matrix has only six elements that can be changed,
it is usually specified in PDF as the six-element array [ a b c d e f ].
Coordinate transformations are expressed as matrix multiplications::
a b 0
[ x′ y′ 1 ] = [ x y 1 ] × c d 0
e f 1
Example:
>>> from pypdf import PdfWriter, Transformation
>>> page = PdfWriter().add_blank_page(800, 600)
>>> op = Transformation().scale(sx=2, sy=3).translate(tx=10, ty=20)
>>> page.add_transformation(op)
"""
def __init__(self, ctm: CompressedTransformationMatrix = (1, 0, 0, 1, 0, 0)) -> None:
self.ctm = ctm
@property
def matrix(self) -> TransformationMatrixType:
"""
Return the transformation matrix as a tuple of tuples in the form:
((a, b, 0), (c, d, 0), (e, f, 1))
"""
return (
(self.ctm[0], self.ctm[1], 0),
(self.ctm[2], self.ctm[3], 0),
(self.ctm[4], self.ctm[5], 1),
)
@staticmethod
def compress(matrix: TransformationMatrixType) -> CompressedTransformationMatrix:
"""
Compresses the transformation matrix into a tuple of (a, b, c, d, e, f).
Args:
matrix: The transformation matrix as a tuple of tuples.
Returns:
A tuple representing the transformation matrix as (a, b, c, d, e, f)
"""
return (
matrix[0][0],
matrix[0][1],
matrix[1][0],
matrix[1][1],
matrix[2][0],
matrix[2][1],
)
def _to_cm(self) -> str:
# Returns the cm operation string for the given transformation matrix
return (
f"{self.ctm[0]:.4f} {self.ctm[1]:.4f} {self.ctm[2]:.4f} "
f"{self.ctm[3]:.4f} {self.ctm[4]:.4f} {self.ctm[5]:.4f} cm"
)
def transform(self, m: "Transformation") -> "Transformation":
"""
Apply one transformation to another.
Args:
m: a Transformation to apply.
Returns:
A new ``Transformation`` instance
Example:
>>> from pypdf import PdfWriter, Transformation
>>> height, width = 40, 50
>>> page = PdfWriter().add_blank_page(800, 600)
>>> op = Transformation((1, 0, 0, -1, 0, height)) # vertical mirror
>>> op = Transformation().transform(Transformation((-1, 0, 0, 1, width, 0))) # horizontal mirror
>>> page.add_transformation(op)
"""
ctm = Transformation.compress(matrix_multiply(self.matrix, m.matrix))
return Transformation(ctm)
def translate(self, tx: float = 0, ty: float = 0) -> "Transformation":
"""
Translate the contents of a page.
Args:
tx: The translation along the x-axis.
ty: The translation along the y-axis.
Returns:
A new ``Transformation`` instance
"""
m = self.ctm
return Transformation(ctm=(m[0], m[1], m[2], m[3], m[4] + tx, m[5] + ty))
def scale(
self, sx: Optional[float] = None, sy: Optional[float] = None
) -> "Transformation":
"""
Scale the contents of a page towards the origin of the coordinate system.
Typically, that is the lower-left corner of the page. That can be
changed by translating the contents / the page boxes.
Args:
sx: The scale factor along the x-axis.
sy: The scale factor along the y-axis.
Returns:
A new Transformation instance with the scaled matrix.
"""
if sx is None and sy is None:
raise ValueError("Either sx or sy must be specified")
if sx is None:
sx = sy
if sy is None:
sy = sx
assert sx is not None
assert sy is not None
op: TransformationMatrixType = ((sx, 0, 0), (0, sy, 0), (0, 0, 1))
ctm = Transformation.compress(matrix_multiply(self.matrix, op))
return Transformation(ctm)
def rotate(self, rotation: float) -> "Transformation":
"""
Rotate the contents of a page.
Args:
rotation: The angle of rotation in degrees.
Returns:
A new ``Transformation`` instance with the rotated matrix.
"""
rotation = math.radians(rotation)
op: TransformationMatrixType = (
(math.cos(rotation), math.sin(rotation), 0),
(-math.sin(rotation), math.cos(rotation), 0),
(0, 0, 1),
)
ctm = Transformation.compress(matrix_multiply(self.matrix, op))
return Transformation(ctm)
def __repr__(self) -> str:
return f"Transformation(ctm={self.ctm})"
@overload
def apply_on(self, pt: list[float], as_object: bool = False) -> list[float]:
...
@overload
def apply_on(
self, pt: tuple[float, float], as_object: bool = False
) -> tuple[float, float]:
...
def apply_on(
self,
pt: Union[tuple[float, float], list[float]],
as_object: bool = False,
) -> Union[tuple[float, float], list[float]]:
"""
Apply the transformation matrix on the given point.
Args:
pt: A tuple or list representing the point in the form (x, y).
as_object: If True, return items as FloatObject, otherwise as plain floats.
Returns:
A tuple or list representing the transformed point in the form (x', y')
"""
typ = FloatObject if as_object else float
pt1 = (
typ(float(pt[0]) * self.ctm[0] + float(pt[1]) * self.ctm[2] + self.ctm[4]),
typ(float(pt[0]) * self.ctm[1] + float(pt[1]) * self.ctm[3] + self.ctm[5]),
)
return list(pt1) if isinstance(pt, list) else pt1
@dataclass
| Transformation |
python | django__django | django/db/models/functions/window.py | {
"start": 2504,
"end": 2622
} | class ____(Func):
function = "PERCENT_RANK"
output_field = FloatField()
window_compatible = True
| PercentRank |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 305597,
"end": 314899
} | class ____:
def __del__(self):
if type(self) is not Font:
return None
def __init__(
self,
fontname=None,
fontfile=None,
fontbuffer=None,
script=0,
language=None,
ordering=-1,
is_bold=0,
is_italic=0,
is_serif=0,
embed=1,
):
if fontbuffer:
if hasattr(fontbuffer, "getvalue"):
fontbuffer = fontbuffer.getvalue()
elif isinstance(fontbuffer, bytearray):
fontbuffer = bytes(fontbuffer)
if not isinstance(fontbuffer, bytes):
raise ValueError("bad type: 'fontbuffer'")
if isinstance(fontname, str):
fname_lower = fontname.lower()
if "/" in fname_lower or "\\" in fname_lower or "." in fname_lower:
message("Warning: did you mean a fontfile?")
if fname_lower in ("cjk", "china-t", "china-ts"):
ordering = 0
elif fname_lower.startswith("china-s"):
ordering = 1
elif fname_lower.startswith("korea"):
ordering = 3
elif fname_lower.startswith("japan"):
ordering = 2
elif fname_lower in fitz_fontdescriptors.keys():
import pymupdf_fonts # optional fonts
fontbuffer = pymupdf_fonts.myfont(fname_lower) # make a copy
fontname = None # ensure using fontbuffer only
del pymupdf_fonts # remove package again
elif ordering < 0:
fontname = Base14_fontdict.get(fontname, fontname)
lang = mupdf.fz_text_language_from_string(language)
font = JM_get_font(fontname, fontfile,
fontbuffer, script, lang, ordering,
is_bold, is_italic, is_serif, embed)
self.this = font
def __repr__(self):
return "Font('%s')" % self.name
@property
def ascender(self):
"""Return the glyph ascender value."""
return mupdf.fz_font_ascender(self.this)
@property
def bbox(self):
return self.this.fz_font_bbox()
@property
def buffer(self):
buffer_ = mupdf.FzBuffer( mupdf.ll_fz_keep_buffer( self.this.m_internal.buffer))
return mupdf.fz_buffer_extract_copy( buffer_)
def char_lengths(self, text, fontsize=11, language=None, script=0, wmode=0, small_caps=0):
"""Return tuple of char lengths of unicode 'text' under a fontsize."""
lang = mupdf.fz_text_language_from_string(language)
rc = []
for ch in text:
c = ord(ch)
if small_caps:
gid = mupdf.fz_encode_character_sc(self.this, c)
if gid >= 0:
font = self.this
else:
gid, font = mupdf.fz_encode_character_with_fallback(self.this, c, script, lang)
rc.append(fontsize * mupdf.fz_advance_glyph(font, gid, wmode))
return rc
@property
def descender(self):
"""Return the glyph descender value."""
return mupdf.fz_font_descender(self.this)
@property
def flags(self):
f = mupdf.ll_fz_font_flags(self.this.m_internal)
if not f:
return
assert isinstance( f, mupdf.fz_font_flags_t)
#log( '{=f}')
if mupdf_cppyy:
# cppyy includes remaining higher bits.
v = [f.is_mono]
def b(bits):
ret = v[0] & ((1 << bits)-1)
v[0] = v[0] >> bits
return ret
is_mono = b(1)
is_serif = b(1)
is_bold = b(1)
is_italic = b(1)
ft_substitute = b(1)
ft_stretch = b(1)
fake_bold = b(1)
fake_italic = b(1)
has_opentype = b(1)
invalid_bbox = b(1)
cjk_lang = b(1)
embed = b(1)
never_embed = b(1)
return {
"mono": is_mono if mupdf_cppyy else f.is_mono,
"serif": is_serif if mupdf_cppyy else f.is_serif,
"bold": is_bold if mupdf_cppyy else f.is_bold,
"italic": is_italic if mupdf_cppyy else f.is_italic,
"substitute": ft_substitute if mupdf_cppyy else f.ft_substitute,
"stretch": ft_stretch if mupdf_cppyy else f.ft_stretch,
"fake-bold": fake_bold if mupdf_cppyy else f.fake_bold,
"fake-italic": fake_italic if mupdf_cppyy else f.fake_italic,
"opentype": has_opentype if mupdf_cppyy else f.has_opentype,
"invalid-bbox": invalid_bbox if mupdf_cppyy else f.invalid_bbox,
'cjk': cjk_lang if mupdf_cppyy else f.cjk,
'cjk-lang': cjk_lang if mupdf_cppyy else f.cjk_lang,
'embed': embed if mupdf_cppyy else f.embed,
'never-embed': never_embed if mupdf_cppyy else f.never_embed,
}
def glyph_advance(self, chr_, language=None, script=0, wmode=0, small_caps=0):
"""Return the glyph width of a unicode (font size 1)."""
lang = mupdf.fz_text_language_from_string(language)
if small_caps:
gid = mupdf.fz_encode_character_sc(self.this, chr_)
if gid >= 0:
font = self.this
else:
gid, font = mupdf.fz_encode_character_with_fallback(self.this, chr_, script, lang)
return mupdf.fz_advance_glyph(font, gid, wmode)
def glyph_bbox(self, char, language=None, script=0, small_caps=0):
"""Return the glyph bbox of a unicode (font size 1)."""
lang = mupdf.fz_text_language_from_string(language)
if small_caps:
gid = mupdf.fz_encode_character_sc( self.this, char)
if gid >= 0:
font = self.this
else:
gid, font = mupdf.fz_encode_character_with_fallback( self.this, char, script, lang)
return Rect(mupdf.fz_bound_glyph( font, gid, mupdf.FzMatrix()))
@property
def glyph_count(self):
return self.this.m_internal.glyph_count
def glyph_name_to_unicode(self, name):
"""Return the unicode for a glyph name."""
return glyph_name_to_unicode(name)
def has_glyph(self, chr, language=None, script=0, fallback=0, small_caps=0):
"""Check whether font has a glyph for this unicode."""
if fallback:
lang = mupdf.fz_text_language_from_string(language)
gid, font = mupdf.fz_encode_character_with_fallback(self.this, chr, script, lang)
else:
if small_caps:
gid = mupdf.fz_encode_character_sc(self.this, chr)
else:
gid = mupdf.fz_encode_character(self.this, chr)
return gid
@property
def is_bold(self):
return mupdf.fz_font_is_bold( self.this)
@property
def is_italic(self):
return mupdf.fz_font_is_italic( self.this)
@property
def is_monospaced(self):
return mupdf.fz_font_is_monospaced( self.this)
@property
def is_serif(self):
return mupdf.fz_font_is_serif( self.this)
@property
def is_writable(self):
return True # see pymupdf commit ef4056ee4da2
font = self.this
flags = mupdf.ll_fz_font_flags(font.m_internal)
if mupdf_cppyy:
# cppyy doesn't handle bitfields correctly.
import cppyy
ft_substitute = cppyy.gbl.mupdf_mfz_font_flags_ft_substitute( flags)
else:
ft_substitute = flags.ft_substitute
if ( mupdf.ll_fz_font_t3_procs(font.m_internal)
or ft_substitute
or not mupdf.pdf_font_writing_supported(font)
):
return False
return True
@property
def name(self):
ret = mupdf.fz_font_name(self.this)
#log( '{ret=}')
return ret
def text_length(self, text, fontsize=11, language=None, script=0, wmode=0, small_caps=0):
"""Return length of unicode 'text' under a fontsize."""
thisfont = self.this
lang = mupdf.fz_text_language_from_string(language)
rc = 0
if not isinstance(text, str):
raise TypeError( MSG_BAD_TEXT)
for ch in text:
c = ord(ch)
if small_caps:
gid = mupdf.fz_encode_character_sc(thisfont, c)
if gid >= 0:
font = thisfont
else:
gid, font = mupdf.fz_encode_character_with_fallback(thisfont, c, script, lang)
rc += mupdf.fz_advance_glyph(font, gid, wmode)
rc *= fontsize
return rc
def unicode_to_glyph_name(self, ch):
"""Return the glyph name for a unicode."""
return unicode_to_glyph_name(ch)
def valid_codepoints(self):
'''
Returns sorted list of valid unicodes of a fz_font.
'''
ucs_gids = mupdf.fz_enumerate_font_cmap2(self.this)
ucss = [i.ucs for i in ucs_gids]
ucss_unique = set(ucss)
ucss_unique_sorted = sorted(ucss_unique)
return ucss_unique_sorted
| Font |
python | Netflix__metaflow | metaflow/packaging_sys/__init__.py | {
"start": 19755,
"end": 24901
} | class ____(MetaflowCodeContent, version_id=0):
@classmethod
def get_info_impl(
cls, mfcontent_info: Optional[Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
path_to_file = os.path.join(get_metaflow_root(), "INFO")
if os.path.isfile(path_to_file):
with open(path_to_file, "r", encoding="utf-8") as f:
return json.load(f)
return None
@classmethod
def get_config_impl(
cls, mfcontent_info: Optional[Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
path_to_file = os.path.join(get_metaflow_root(), "CONFIG")
if os.path.isfile(path_to_file):
with open(path_to_file, "r", encoding="utf-8") as f:
return json.load(f)
return None
@classmethod
def get_filename_impl(
cls,
mfcontent_info: Optional[Dict[str, Any]],
filename: str,
content_type: ContentType,
) -> Optional[str]:
"""
For V0, the filename is simply the filename passed in.
"""
path_to_file = os.path.join(get_metaflow_root(), filename)
if os.path.isfile(path_to_file):
return path_to_file
return None
@classmethod
def get_distribution_finder_impl(
cls, mfcontent_info: Optional[Dict[str, Any]]
) -> Optional["metaflow.extension_support.metadata.DistributionFinder"]:
return None
@classmethod
def get_archive_info_impl(
cls,
mfcontent_info: Optional[Dict[str, Any]],
archive: Any,
packaging_backend: Type[PackagingBackend] = TarPackagingBackend,
) -> Optional[Dict[str, Any]]:
info_content = packaging_backend.cls_get_member(archive, "INFO")
if info_content:
return json.loads(info_content)
return None
@classmethod
def get_archive_config_impl(
cls,
mfcontent_info: Optional[Dict[str, Any]],
archive: Any,
packaging_backend: Type[PackagingBackend] = TarPackagingBackend,
) -> Optional[Dict[str, Any]]:
info_content = packaging_backend.cls_get_member(archive, "CONFIG")
if info_content:
return json.loads(info_content)
return None
@classmethod
def get_archive_filename_impl(
cls,
mfcontent_info: Optional[Dict[str, Any]],
archive: Any,
filename: str,
content_type: ContentType,
packaging_backend: Type[PackagingBackend] = TarPackagingBackend,
) -> str:
if packaging_backend.cls_has_member(archive, filename):
# The file is present in the archive
return filename
return None
@classmethod
def get_archive_content_members_impl(
cls,
mfcontent_info: Optional[Dict[str, Any]],
archive: Any,
content_types: Optional[int] = None,
packaging_backend: Type[PackagingBackend] = TarPackagingBackend,
) -> List[Any]:
"""
For V0, we use a static list of known files to classify the content
"""
known_prefixes = {
"metaflow/": ContentType.CODE_CONTENT.value,
"metaflow_extensions/": ContentType.CODE_CONTENT.value,
"INFO": ContentType.OTHER_CONTENT.value,
"CONFIG": ContentType.OTHER_CONTENT.value,
"conda.manifest": ContentType.OTHER_CONTENT.value,
"uv.lock": ContentType.OTHER_CONTENT.value,
"pyproject.toml": ContentType.OTHER_CONTENT.value,
# Used in nflx-metaflow-extensions
"condav2-1.cnd": ContentType.OTHER_CONTENT.value,
}
to_return = []
for member in packaging_backend.cls_list_members(archive):
filename = packaging_backend.cls_member_name(member)
added = False
for prefix, classification in known_prefixes.items():
if (
prefix[-1] == "/" and filename.startswith(prefix)
) or prefix == filename:
if content_types & classification:
to_return.append(member)
added = True
break
if not added and content_types & ContentType.USER_CONTENT.value:
# Everything else is user content
to_return.append(member)
return to_return
@classmethod
def get_post_extract_env_vars_impl(cls, dest_dir: str) -> Dict[str, str]:
return {"PYTHONPATH": dest_dir}
def get_excluded_tl_entries(self) -> List[str]:
"""
When packaging Metaflow from within an executing Metaflow flow, we need to
exclude the files that are inserted by this content from being packaged (possibly).
Use this function to return these files or top-level directories.
Returns
-------
List[str]
Files or directories to exclude
"""
return ["CONFIG", "INFO"]
# Other non-implemented methods are OK not being implemented as they will never
# be called as they are only used when creating the package and we are starting
# with V1.
| MetaflowCodeContentV0 |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 94389,
"end": 94753
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("column_id", "client_mutation_id")
column_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="columnId")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| DeleteProjectColumnInput |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_eigenvector_centrality.py | {
"start": 4031,
"end": 5254
} | class ____:
def test_multigraph(self):
with pytest.raises(nx.NetworkXException):
nx.eigenvector_centrality(nx.MultiGraph())
def test_multigraph_numpy(self):
with pytest.raises(nx.NetworkXException):
nx.eigenvector_centrality_numpy(nx.MultiGraph())
def test_null(self):
with pytest.raises(nx.NetworkXException):
nx.eigenvector_centrality(nx.Graph())
def test_null_numpy(self):
with pytest.raises(nx.NetworkXException):
nx.eigenvector_centrality_numpy(nx.Graph())
@pytest.mark.parametrize(
"G",
[
nx.empty_graph(3),
nx.DiGraph([(0, 1), (1, 2)]),
],
)
def test_disconnected_numpy(self, G):
msg = "does not give consistent results for disconnected"
with pytest.raises(nx.AmbiguousSolution, match=msg):
nx.eigenvector_centrality_numpy(G)
def test_zero_nstart(self):
G = nx.Graph([(1, 2), (1, 3), (2, 3)])
with pytest.raises(
nx.NetworkXException, match="initial vector cannot have all zero values"
):
nx.eigenvector_centrality(G, nstart={v: 0 for v in G})
| TestEigenvectorCentralityExceptions |
python | joke2k__faker | faker/providers/address/bn_BD/__init__.py | {
"start": 115,
"end": 12390
} | class ____(AddressProvider):
area_names = (
"আলি",
"আলম",
"অভয়",
"আনোয়ার",
"ব্রাহ্মণ",
"বটিয়া",
"বাঘার",
"বেগম",
"বিজয়",
"বন্দর",
"বালিয়া",
"বাজিত",
"বাকের",
"বোরহান",
"বকশী",
"বদর",
"বিরাম",
"বিশ্বনাথ",
"চৌদ্দ",
"চিতল",
"দাউদ",
"দৌলত",
"দেব",
"দেবী",
"ইসলাম",
"ফুল",
"ফকির",
"ফটিক",
"গোপাল",
"গৌর",
"হাজী",
"হরিরাম",
"হোসেন",
"হাকিম",
"জীবন",
"জগন্নাথ",
"কুমার",
"কালী",
"কেশব",
"কুতুব",
"কবির",
"কালিয়া",
"করিম",
"কাজী",
"কমল",
"লাল",
"মুরাদ",
"মনোহর",
"মির",
"মহেশ",
"মোড়ল",
"মোল্লা",
"মোহাম্মদ",
"মনিরাম",
"মানিক",
"মির্জা",
"মধু",
"মোহন",
"মহাদেব",
"মাধব",
"নাসির",
"নড়িয়া",
"নাজির",
"নালিতা",
"নন্দী",
"ওসমানী",
"পাইক",
"পলাশ",
"পার্বতী",
"রাম",
"রায়",
"রানী",
"সোনা",
"শরণ",
"শ্যাম",
"সুবর্ণ",
"সিরাজ",
"সখি",
"সদর",
"সুন্দর",
"সৈয়দ",
"শাজাহান",
"শান্তি",
"শিব",
"তের",
"তারা",
"উজির",
)
building_names = (
"বাড়ী নং",
"বিল্ডিং নং",
"বাসা নং",
"হোল্ডিং নং",
)
building_number_formats = ("%", "%#", "%##")
city_prefixes = ("উত্তর", "পূর্ব", "পশ্চিম", "দক্ষিণ", "মধ্য", "নতুন", "পুরাতন")
city_suffixes = (
"বাজার",
"বাড়ি",
"চর",
"দিয়া",
"ডাঙ্গা",
"গঞ্জ",
"গ্রাম",
"গাঁও",
"গাঁ",
"গড়",
"হাট",
"হার",
"খালি",
"মাটি",
"নগর",
"পুর",
"তলা",
)
cities = (
"বরগুনা",
"বরিশাল",
"ভোলা",
"বান্দরবান",
"ব্রাহ্মণবাড়িয়া",
"বাগেরহাট",
"বগুড়া",
"চাঁদপুর",
"চট্টগ্রাম",
"কুমিল্লা",
"কক্সবাজার",
"চুয়াডাঙ্গা",
"ঢাকা",
"দিনাজপুর",
"ফরিদপুর",
"ফেনী",
"গাজীপুর",
"গোপালগঞ্জ",
"গাইবান্ধা",
"হবিগঞ্জ",
"ঝালকাঠি",
"যশোর",
"ঝিনাইদহ",
"জামালপুর",
"জয়পুরহাট",
"খাগড়াছড়ি",
"কিশোরগঞ্জ",
"খুলনা",
"কুষ্টিয়া",
"কুড়িগ্রাম",
"লক্ষ্মীপুর",
"লালমনিরহাট",
"মাদারীপুর",
"মানিকগঞ্জ",
"মুন্সীগঞ্জ",
"মাগুরা",
"মেহেরপুর",
"ময়মনসিংহ",
"মৌলভীবাজার",
"নোয়াখালী",
"নারায়ণগঞ্জ",
"নরসিংদী",
"নড়াইল",
"নেত্রকোণা",
"নওগাঁ",
"নাটোর",
"চাঁপাইনবাবগঞ্জ",
"নীলফামারী",
"পটুয়াখালী",
"পিরোজপুর",
"পাবনা",
"পঞ্চগড়",
"রাঙ্গামাটি",
"রাজবাড়ী",
"রাজশাহী",
"রংপুর",
"শরীয়তপুর",
"সাতক্ষীরা",
"শেরপুর",
"সিরাজগঞ্জ",
"সুনামগঞ্জ",
"সিলেট",
"টাঙ্গাইল",
"ঠাকুরগাঁও",
)
countries = (
"আফগানিস্তান",
"আলবেনিয়া",
"আলজেরিয়া",
"আমেরিকান সামোয়া",
"অ্যান্ডোরা",
"অ্যাঙ্গোলা",
"অ্যাঙ্গুইলা",
"অ্যান্টার্কটিকা",
"অ্যান্টিগুয়া ও বার্বুডা",
"আর্জেন্টিনা",
"আর্মেনিয়া",
"আরুবা",
"অস্ট্রেলিয়া",
"অস্ট্রিয়া",
"আজারবাইজান",
"বাহামাস",
"বাহরাইন",
"বাংলাদেশ",
"বার্বাডোস",
"বেলারুশ",
"বেলজিয়াম",
"বেলিজ",
"বেনিন",
"বারমুডা",
"ভুটান",
"বলিভিয়া",
"বসনিয়া-হার্জেগোভিনা",
"বতসোয়ানা",
"বাউবে দ্বীপ",
"ব্রাজিল",
"ব্রিটিশ ভারত মহাসাগরীয় অঞ্চল",
"ব্রিটিশ ভার্জিন দ্বীপপুঞ্জ",
"ব্রুনাই",
"বুলগেরিয়া",
"বুর্কিনা ফাসো",
"বুরুন্ডি",
"কম্বোডিয়া",
"ক্যামেরুন",
"কানাডা",
"কেপ ভার্দে",
"কেম্যান দ্বীপপুঞ্জ",
"মধ্য আফ্রিকান প্রজাতন্ত্র",
"চাদ",
"চিলি",
"চীন",
"ক্রিস্টমাস দ্বীপ",
"কোকোস দ্বীপপুঞ্জ",
"কলাম্বিয়া",
"কোমোরোস",
"কঙ্গো প্রজাতন্ত্র",
"কুক দ্বীপপুঞ্জ",
"কোস্টারিকা",
"আইভরি কোট",
"ক্রোয়েশিয়া",
"কিউবা",
"সাইপ্রাস প্রজাতন্ত্র",
"চেক প্রজাতন্ত্র",
"ডেনমার্ক",
"জিবুতি প্রজাতন্ত্র",
"ডোমিনিকা",
"ডোমিনিকান প্রজাতন্ত্র",
"ইকুয়েডর",
"মিশর",
"এল সালভাদর",
"নিরক্ষীয় গিনি",
"ইরিত্রিয়া",
"এস্তোনিয়া",
"ইথিওপিয়া",
"ফারো দ্বীপপুঞ্জ",
"ফকল্যান্ড দ্বীপপুঞ্জ",
"ফিজি প্রজাতন্ত্র",
"ফিনল্যান্ড",
"ফ্রান্স",
"একটি দেশের নাম",
"ফরাসি পলিনেশিয়া",
"ফরাসি সুদূর দক্ষিণ দ্বীপপুঞ্জ",
"গ্যাবন",
"গাম্বিয়া",
"জর্জিয়া",
"জার্মানি",
"ঘানা",
"জিব্রাল্টার",
"গ্রীস",
"গ্রিনল্যান্ড",
"গ্রেনাডা",
"গুয়াডেলুপ",
"গুয়াম",
"গুয়াতেমালা",
"গার্নসি",
"গিনি",
"গিনি-বিসাউ",
"গিয়ানা",
"হাইতি",
"হার্ড আইল্যান্ড এবং ম্যাকডোনাল্ড দ্বীপপুঞ্জ",
"ভ্যাটিকান সিটি",
"হন্ডুরাস",
"হংকং",
"হাঙ্গেরি",
"আইসল্যান্ড",
"ভারত",
"ইন্দোনেশিয়া",
"ইরান",
"ইরাক",
"আপনি উত্তর দিবেন না",
"আইল অফ ম্যান",
"ইসরায়েল",
"ইতালি",
"জ্যামাইকা",
"জাপান",
"জার্সি",
"জর্ডান",
"কাজাখস্তান",
"কেনিয়া",
"কিরিবাতি",
"কোরিয়া",
"কোরিয়া",
"কুয়েত",
"কিরগিজস্তান প্রজাতন্ত্র",
"পিপলস ডেমোক্রেটিক রিপাবলিক অফ লাওস",
"লাটভিয়া",
"লেবানন",
"লেসোথো",
"লাইবেরিয়া",
"লিবিয়া",
"লিচেনস্টাইন",
"লিথুয়ানিয়া",
"লাক্সেমবার্গ",
"ম্যাকাও",
"উত্তর মেসিডোনিয়া প্রজাতন্ত্র",
"মাদাগাস্কার",
"মালাউই",
"মালয়েশিয়া",
"মালদ্বীপ",
"মালি",
"মাল্টা প্রজাতন্ত্র",
"মার্শাল দ্বীপপুঞ্জ",
"মার্টিনিক",
"ইসলামী প্রজাতন্ত্র মৌরিতানিয়া",
"মরিশাস",
"মায়োট",
"মেক্সিকো",
"মাইক্রোনেশিয়ার ফেডারেটেড স্টেটস",
"মোল্দোভা প্রজাতন্ত্র",
"মোনাকোর রাজত্ব",
"মঙ্গোলিয়া",
"মন্টিনিগ্রো প্রজাতন্ত্র",
"মন্টসেরাট",
"মরক্কো",
"মোজাম্বিক",
"মিয়ানমার",
"নামিবিয়া",
"নাউরু",
"নেপাল",
"নেদারল্যান্ডস এন্টিলস",
"নেদারল্যান্ডস",
"নতুন ক্যালেডোনিয়া",
"নিউজিল্যান্ড",
"নিকারাগুয়া",
"নাইজার",
"নাইজেরিয়া",
"সুন্দর",
"নরফোক দ্বীপ",
"উত্তর মারিয়ানা দ্বীপপুঞ্জ",
"নরওয়ে",
"ওমান",
"পাকিস্তান",
"পালাউ",
"ফিলিস্তিন অঞ্চল",
"পানামা",
"পাপুয়া নিউ গিনি",
"প্যারাগুয়ে",
"পেরু",
"ফিলিপাইন",
"পিটকের্ন দ্বীপপুঞ্জ",
"পোল্যান্ড",
"পর্তুগাল",
"পুয়ের্তো রিকো",
"কাতার",
"পুনর্মিলন",
"রোমানিয়া",
"রাশিয়া",
"রুয়ান্ডা",
"সেন্ট বার্থেলেমি",
"সেন্ট হেলেনা",
"সেন্ট কিটস ও নেভিস",
"সেন্ট লুসিয়া",
"সেন্ট মার্টিন",
"সেন্ট পিয়ের এবং মিকেলন",
"সেন্ট ভিনসেন্ট ও গ্রেনাডাইন দ্বীপপুঞ্জ",
"সামোয়া",
"সান মারিনো",
"স্যান্টোম প্রিন্সিপ",
"সৌদি আরব",
"সেনেগাল",
"সার্বিয়া",
"সেশেলস",
"সিয়েরা লিওন",
"সিঙ্গাপুর",
"স্লোভাকিয়া",
"স্লোভেনিয়া",
"সলোমান দ্বীপপুঞ্জ",
"সোমালিয়া",
"দক্ষিণ আফ্রিকা প্রজাতন্ত্র",
"দক্ষিণ জর্জিয়া এবং দক্ষিণ স্যান্ডউইচ দ্বীপপুঞ্জ",
"স্পেন",
"শ্রীলংকা",
"সুদান",
"সুরিনাম",
"স্বালবার্ড এবং জানমায়েন দ্বীপপুঞ্জ",
"সোয়াজিল্যান্ডের রাজ্য",
"সুইডেন",
"সুইজারল্যান্ড",
"সিরিয়া",
"তাইওয়ান",
"তাজিকিস্তান প্রজাতন্ত্র",
"তানজানিয়া",
"থাইল্যান্ড",
"পূর্ব তিমুর",
"যাও",
"টোকেলাউ",
"টোঙ্গা",
"ত্রিনিদাদ ও টোবাগো প্রজাতন্ত্র",
"তিউনিসিয়া",
"তুরস্ক",
"তুর্কমেনিস্তান",
"টার্কস্ ও কেইকোস দ্বীপপুঞ্জ",
"টুভালু",
"উগান্ডা",
"ইউক্রেন",
"সংযুক্ত আরব আমিরাত",
"ইংল্যান্ড",
"মার্কিন যুক্তরাষ্ট্র",
"ইউএস মাইনর আউটলি আইল্যান্ড",
"আমেরিকান ভার্জিন আইল্যান্ডস",
"উরুগুয়ে",
"উজবেকিস্তান",
"ভানুয়াতু",
"ভেনিজুয়েলা",
"ভিয়েতনাম",
"ওয়ালিস এবং ফুটুনা",
"পশ্চিম সাহারা",
"ইয়েমেন",
"জাম্বিয়া",
"জিম্বাবুয়ে",
)
secondary_address_formats = (
"ফ্ল্যাট %",
"ফ্ল্যাট %#",
"ষ্টুডিও %",
"ষ্টুডিও %#",
"অ্যাপার্টমেন্ট %",
"অ্যাপার্টমেন্ট %#",
)
street_suffixes = (
"এভিনিউ",
"সেন্টার",
"চত্বর",
"গলি",
"ঘাট",
"কর্নার",
"লেন",
"মহাসড়ক",
"মহল্লা",
"মোড়",
"পাড়া",
"পার্ক",
"প্লাজা",
"রাস্তা",
"রোড",
"সড়ক",
"স্টেশন",
"স্ট্যান্ড",
)
postcode_formats = ("%###",)
street_name_formats = (
"{{area_name}}{{street_suffix}}",
"{{city_prefix}} {{area_name}}{{street_suffix}}",
"{{city_prefix}} {{area_name}}{{city_suffix}}",
"{{area_name}}{{city_suffix}}",
"{{area_name}}{{city_suffix}} {{street_suffix}}",
"{{city_prefix}} {{area_name}}{{city_suffix}} {{street_suffix}}",
)
street_address_formats = (
"{{building_name}} {{building_number}}, {{street_name}}",
"{{secondary_address}}, {{building_name}} {{building_number}}, {{street_name}}",
)
town_formats = ("{{area_name}}{{city_suffix}}",)
address_formats = ("{{street_address}}, {{town}}, {{city}}, {{postcode}}",)
def administrative_unit(self) -> str:
"""
:example: 'ঢাকা'
"""
return self.random_element(self.cities)
def area_name(self) -> str:
"""
:example: 'উজির'
"""
return self.random_element(self.area_names)
def building_name(self) -> str:
"""
:example: 'বাড়ী নং'
"""
return self.random_element(self.building_names)
def building_number(self) -> str:
"""
:example: '791' to '৭৯১'
"""
return translate_to_bengali_digits(self.numerify(self.random_element(self.building_number_formats)))
def city_prefix(self) -> str:
"""
:example: 'উত্তর'
"""
return self.random_element(self.city_prefixes)
def city(self) -> str:
"""
:example: 'ঢাকা'
"""
return self.random_element(self.cities)
def postcode(self) -> str:
"""
See
https://bdpost.portal.gov.bd/site/page/6aaeabe4-479b-4e5a-a671-e9e5b994bf9a
"""
return translate_to_bengali_digits(self.numerify(self.random_element(self.postcode_formats)))
def secondary_address(self) -> str:
"""
As the generated string format is a Bengali word but English number so splitting the value by space
and then convert the English number to Bengali number and concat with generated Bengali word
and return
: example : 'অ্যাপার্টমেন্ট 14' to 'অ্যাপার্টমেন্ট ১৪'
"""
value = self.bothify(self.random_element(self.secondary_address_formats))
word_list = value.split(" ")
return word_list[0] + " " + translate_to_bengali_digits(word_list[1])
def town(self) -> str:
"""
:example: 'নবাব'
"""
pattern: str = self.random_element(self.town_formats)
return self.generator.parse(pattern)
| Provider |
python | pandas-dev__pandas | pandas/tests/frame/test_query_eval.py | {
"start": 2645,
"end": 7743
} | class ____:
# smaller hits python, larger hits numexpr
@pytest.mark.parametrize("n", [4, 4000])
@pytest.mark.parametrize(
"op_str,op,rop",
[
("+", "__add__", "__radd__"),
("-", "__sub__", "__rsub__"),
("*", "__mul__", "__rmul__"),
("/", "__truediv__", "__rtruediv__"),
],
)
def test_ops(self, op_str, op, rop, n):
# tst ops and reversed ops in evaluation
# GH7198
df = DataFrame(1, index=range(n), columns=list("abcd"))
df.iloc[0] = 2
m = df.mean()
base = DataFrame( # noqa: F841
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
expected = eval(f"base {op_str} df")
# ops as strings
result = eval(f"m {op_str} df")
tm.assert_frame_equal(result, expected)
# these are commutative
if op in ["+", "*"]:
result = getattr(df, op)(m)
tm.assert_frame_equal(result, expected)
# these are not
elif op in ["-", "/"]:
result = getattr(df, rop)(m)
tm.assert_frame_equal(result, expected)
def test_dataframe_sub_numexpr_path(self):
# GH7192: Note we need a large number of rows to ensure this
# goes through the numexpr path
df = DataFrame({"A": np.random.default_rng(2).standard_normal(25000)})
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
result = (1 - np.isnan(df)).iloc[0:25]
tm.assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]})
msg = "expr must be a string to be evaluated"
with pytest.raises(ValueError, match=msg):
df.query(lambda x: x.B == "b")
with pytest.raises(ValueError, match=msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = DataFrame({"A": [1, 2, 3]})
msg = "expr cannot be an empty string"
with pytest.raises(ValueError, match=msg):
df.query("")
def test_query_duplicate_column_name(self, engine, parser):
df = DataFrame(
{
"A": range(3),
"B": range(3),
"C": range(3)
}
).rename(columns={"B": "A"})
res = df.query("C == 1", engine=engine, parser=parser)
expect = DataFrame(
[[1, 1, 1]],
columns=["A", "A", "C"],
index=[1]
)
tm.assert_frame_equal(res, expect)
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 2)), columns=list("ab")
)
dict1 = {"a": 1}
dict2 = {"b": 2}
assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
def test_eval_resolvers_combined(self):
# GH 34966
df = DataFrame(
np.random.default_rng(2).standard_normal((10, 2)), columns=list("ab")
)
dict1 = {"c": 2}
# Both input and default index/column resolvers should be usable
result = df.eval("a + b * c", resolvers=[dict1])
expected = df["a"] + df["b"] * dict1["c"]
tm.assert_series_equal(result, expected)
def test_eval_object_dtype_binop(self):
# GH#24883
df = DataFrame({"a1": ["Y", "N"]})
res = df.eval("c = ((a1 == 'Y') & True)")
expected = DataFrame({"a1": ["Y", "N"], "c": [True, False]})
tm.assert_frame_equal(res, expected)
def test_using_numpy(self, engine, parser):
# GH 58041
skip_if_no_pandas_parser(parser)
df = Series([0.2, 1.5, 2.8], name="a").to_frame()
res = df.eval("@np.floor(a)", engine=engine, parser=parser)
expected = np.floor(df["a"])
tm.assert_series_equal(expected, res)
def test_eval_simple(self, engine, parser):
df = Series([0.2, 1.5, 2.8], name="a").to_frame()
res = df.eval("a", engine=engine, parser=parser)
expected = df["a"]
tm.assert_series_equal(expected, res)
def test_extension_array_eval(self, engine, parser, request):
# GH#58748
if engine == "numexpr":
mark = pytest.mark.xfail(
reason="numexpr does not support extension array dtypes"
)
request.applymarker(mark)
df = DataFrame({"a": pd.array([1, 2, 3]), "b": pd.array([4, 5, 6])})
result = df.eval("a / b", engine=engine, parser=parser)
expected = Series(pd.array([0.25, 0.40, 0.50]))
tm.assert_series_equal(result, expected)
def test_complex_eval(self, engine, parser):
# GH#21374
df = DataFrame({"a": [1 + 2j], "b": [1 + 1j]})
result = df.eval("a/b", engine=engine, parser=parser)
expected = Series([1.5 + 0.5j])
tm.assert_series_equal(result, expected)
| TestDataFrameEval |
python | pytorch__pytorch | test/torch_np/test_reductions.py | {
"start": 923,
"end": 1067
} | class ____(TestCase):
def test_basic(self):
x = np.arange(-2, 3)
assert_equal(np.flatnonzero(x), [0, 1, 3, 4])
| TestFlatnonzero |
python | jazzband__django-simple-history | simple_history/models.py | {
"start": 32624,
"end": 33299
} | class ____:
def get_queryset(self, **hints):
instance = hints.get("instance")
if instance:
history = getattr(instance, SIMPLE_HISTORY_REVERSE_ATTR_NAME, None)
histmgr = getattr(
self.get_related_model(),
getattr(
self.get_related_model()._meta,
"simple_history_manager_attribute",
"_notthere",
),
None,
)
if history and histmgr:
return histmgr.as_of(getattr(history, "_as_of", history.history_date))
return super().get_queryset(**hints)
| HistoricDescriptorMixin |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/models/cloud_run_v2.py | {
"start": 8210,
"end": 12094
} | class ____(BaseModel):
"""
ExecutionV2 is a data model for an execution of a job that will be run on
Cloud Run API v2.
"""
name: str
uid: str
generation: str
labels: Dict[str, str]
annotations: Dict[str, str]
createTime: str
startTime: Optional[str]
completionTime: Optional[str]
deleteTime: Optional[str]
expireTime: Optional[str]
launchStage: Literal[
"ALPHA",
"BETA",
"GA",
"DEPRECATED",
"EARLY_ACCESS",
"PRELAUNCH",
"UNIMPLEMENTED",
"LAUNCH_TAGE_UNSPECIFIED",
]
job: str
parallelism: int
taskCount: int
template: Dict
reconciling: bool
conditions: List[Dict]
observedGeneration: Optional[str]
runningCount: Optional[int]
succeededCount: Optional[int]
failedCount: Optional[int]
cancelledCount: Optional[int]
retriedCount: Optional[int]
logUri: str
satisfiesPzs: bool
etag: str
def is_running(self) -> bool:
"""
Return whether the execution is running.
Returns:
Whether the execution is running.
"""
return self.completionTime is None
def succeeded(self):
"""Whether or not the Execution completed is a successful state."""
completed_condition = self.condition_after_completion()
if (
completed_condition
and completed_condition["state"] == "CONDITION_SUCCEEDED"
):
return True
return False
def condition_after_completion(self) -> Dict:
"""
Return the condition after completion.
Returns:
The condition after completion.
"""
if isinstance(self.conditions, List):
for condition in self.conditions:
if condition["type"] == "Completed":
return condition
@classmethod
def get(
cls,
cr_client: Resource,
execution_id: str,
):
"""
Get an execution from Cloud Run with the V2 API.
Args:
cr_client: The base client needed for interacting with GCP
Cloud Run V2 API.
execution_id: The name of the execution to get, in the form of
projects/{project}/locations/{location}/jobs/{job}/executions
/{execution}
"""
# noinspection PyUnresolvedReferences
request = cr_client.jobs().executions().get(name=execution_id)
response = request.execute()
return cls(
name=response["name"],
uid=response["uid"],
generation=response["generation"],
labels=response.get("labels", {}),
annotations=response.get("annotations", {}),
createTime=response["createTime"],
startTime=response.get("startTime"),
completionTime=response.get("completionTime"),
deleteTime=response.get("deleteTime"),
expireTime=response.get("expireTime"),
launchStage=response.get("launchStage", "GA"),
job=response["job"],
parallelism=response["parallelism"],
taskCount=response["taskCount"],
template=response["template"],
reconciling=response.get("reconciling", False),
conditions=response.get("conditions", []),
observedGeneration=response.get("observedGeneration"),
runningCount=response.get("runningCount"),
succeededCount=response.get("succeededCount"),
failedCount=response.get("failedCount"),
cancelledCount=response.get("cancelledCount"),
retriedCount=response.get("retriedCount"),
logUri=response["logUri"],
satisfiesPzs=response.get("satisfiesPzs", False),
etag=response["etag"],
)
| ExecutionV2 |
python | huggingface__transformers | src/transformers/models/timesformer/modeling_timesformer.py | {
"start": 11836,
"end": 17954
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: TimesformerConfig, layer_index: int) -> None:
super().__init__()
attention_type = config.attention_type
drop_path_rates = [
x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers, device="cpu")
] # stochastic depth decay rule
drop_path_rate = drop_path_rates[layer_index]
self.drop_path = TimeSformerDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.attention = TimeSformerAttention(config)
self.intermediate = TimesformerIntermediate(config)
self.output = TimesformerOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.config = config
self.attention_type = attention_type
if attention_type not in ["divided_space_time", "space_only", "joint_space_time"]:
raise ValueError(f"Unknown attention type: {attention_type}")
# Temporal Attention Parameters
if self.attention_type == "divided_space_time":
self.temporal_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.temporal_attention = TimeSformerAttention(config)
self.temporal_dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor, output_attentions: bool = False):
num_frames = self.config.num_frames
num_patch_width = self.config.image_size // self.config.patch_size
batch_size = hidden_states.shape[0]
num_spatial_tokens = (hidden_states.size(1) - 1) // num_frames
num_patch_height = num_spatial_tokens // num_patch_width
if self.attention_type in ["space_only", "joint_space_time"]:
self_attention_outputs = self.attention(
self.layernorm_before(hidden_states), output_attentions=output_attentions
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
hidden_states = hidden_states + self.drop_path(attention_output)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
layer_output = hidden_states + self.drop_path(layer_output)
outputs = (layer_output,) + outputs
return outputs
elif self.attention_type == "divided_space_time":
# Temporal
temporal_embedding = hidden_states[:, 1:, :]
temporal_embedding = temporal_embedding.reshape(
batch_size, num_patch_height, num_patch_width, num_frames, temporal_embedding.shape[2]
).reshape(batch_size * num_patch_height * num_patch_width, num_frames, temporal_embedding.shape[2])
temporal_attention_outputs = self.temporal_attention(
self.temporal_layernorm(temporal_embedding),
)
attention_output = temporal_attention_outputs[0]
residual_temporal = self.drop_path(attention_output)
residual_temporal = residual_temporal.reshape(
batch_size, num_patch_height, num_patch_width, num_frames, residual_temporal.shape[2]
).reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_temporal.shape[2])
residual_temporal = self.temporal_dense(residual_temporal)
temporal_embedding = hidden_states[:, 1:, :] + residual_temporal
# Spatial
init_cls_token = hidden_states[:, 0, :].unsqueeze(1)
cls_token = init_cls_token.repeat(1, num_frames, 1)
cls_token = cls_token.reshape(batch_size * num_frames, 1, cls_token.shape[2])
spatial_embedding = temporal_embedding
spatial_embedding = (
spatial_embedding.reshape(
batch_size, num_patch_height, num_patch_width, num_frames, spatial_embedding.shape[2]
)
.permute(0, 3, 1, 2, 4)
.reshape(batch_size * num_frames, num_patch_height * num_patch_width, spatial_embedding.shape[2])
)
spatial_embedding = torch.cat((cls_token, spatial_embedding), 1)
spatial_attention_outputs = self.attention(
self.layernorm_before(spatial_embedding), output_attentions=output_attentions
)
attention_output = spatial_attention_outputs[0]
outputs = spatial_attention_outputs[1:] # add self attentions if we output attention weights
residual_spatial = self.drop_path(attention_output)
# Taking care of CLS token
cls_token = residual_spatial[:, 0, :]
cls_token = cls_token.reshape(batch_size, num_frames, cls_token.shape[1])
cls_token = torch.mean(cls_token, 1, True) # averaging for every frame
residual_spatial = residual_spatial[:, 1:, :]
residual_spatial = (
residual_spatial.reshape(
batch_size, num_frames, num_patch_height, num_patch_width, residual_spatial.shape[2]
)
.permute(0, 2, 3, 1, 4)
.reshape(batch_size, num_patch_height * num_patch_width * num_frames, residual_spatial.shape[2])
)
residual = residual_spatial
hidden_states = temporal_embedding
# Mlp
hidden_states = torch.cat((init_cls_token, hidden_states), 1) + torch.cat((cls_token, residual), 1)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
layer_output = hidden_states + self.drop_path(layer_output)
outputs = (layer_output,) + outputs
return outputs
| TimesformerLayer |
python | sympy__sympy | sympy/functions/elementary/miscellaneous.py | {
"start": 1233,
"end": 9694
} | class ____(Lambda, metaclass=Singleton):
"""
The identity function
Examples
========
>>> from sympy import Id, Symbol
>>> x = Symbol('x')
>>> Id(x)
x
"""
_symbol = Dummy('x')
@property
def signature(self):
return Tuple(self._symbol)
@property
def expr(self):
return self._symbol
Id = S.IdentityFunction
###############################################################################
############################# ROOT and SQUARE ROOT FUNCTION ###################
###############################################################################
def sqrt(arg, evaluate=None):
"""Returns the principal square root.
Parameters
==========
evaluate : bool, optional
The parameter determines if the expression should be evaluated.
If ``None``, its value is taken from
``global_parameters.evaluate``.
Examples
========
>>> from sympy import sqrt, Symbol, S
>>> x = Symbol('x')
>>> sqrt(x)
sqrt(x)
>>> sqrt(x)**2
x
Note that sqrt(x**2) does not simplify to x.
>>> sqrt(x**2)
sqrt(x**2)
This is because the two are not equal to each other in general.
For example, consider x == -1:
>>> from sympy import Eq
>>> Eq(sqrt(x**2), x).subs(x, -1)
False
This is because sqrt computes the principal square root, so the square may
put the argument in a different branch. This identity does hold if x is
positive:
>>> y = Symbol('y', positive=True)
>>> sqrt(y**2)
y
You can force this simplification by using the powdenest() function with
the force option set to True:
>>> from sympy import powdenest
>>> sqrt(x**2)
sqrt(x**2)
>>> powdenest(sqrt(x**2), force=True)
x
To get both branches of the square root you can use the rootof function:
>>> from sympy import rootof
>>> [rootof(x**2-3,i) for i in (0,1)]
[-sqrt(3), sqrt(3)]
Although ``sqrt`` is printed, there is no ``sqrt`` function so looking for
``sqrt`` in an expression will fail:
>>> from sympy.utilities.misc import func_name
>>> func_name(sqrt(x))
'Pow'
>>> sqrt(x).has(sqrt)
False
To find ``sqrt`` look for ``Pow`` with an exponent of ``1/2``:
>>> (x + 1/sqrt(x)).find(lambda i: i.is_Pow and abs(i.exp) is S.Half)
{1/sqrt(x)}
See Also
========
sympy.polys.rootoftools.rootof, root, real_root
References
==========
.. [1] https://en.wikipedia.org/wiki/Square_root
.. [2] https://en.wikipedia.org/wiki/Principal_value
"""
# arg = sympify(arg) is handled by Pow
return Pow(arg, S.Half, evaluate=evaluate)
def cbrt(arg, evaluate=None):
"""Returns the principal cube root.
Parameters
==========
evaluate : bool, optional
The parameter determines if the expression should be evaluated.
If ``None``, its value is taken from
``global_parameters.evaluate``.
Examples
========
>>> from sympy import cbrt, Symbol
>>> x = Symbol('x')
>>> cbrt(x)
x**(1/3)
>>> cbrt(x)**3
x
Note that cbrt(x**3) does not simplify to x.
>>> cbrt(x**3)
(x**3)**(1/3)
This is because the two are not equal to each other in general.
For example, consider `x == -1`:
>>> from sympy import Eq
>>> Eq(cbrt(x**3), x).subs(x, -1)
False
This is because cbrt computes the principal cube root, this
identity does hold if `x` is positive:
>>> y = Symbol('y', positive=True)
>>> cbrt(y**3)
y
See Also
========
sympy.polys.rootoftools.rootof, root, real_root
References
==========
.. [1] https://en.wikipedia.org/wiki/Cube_root
.. [2] https://en.wikipedia.org/wiki/Principal_value
"""
return Pow(arg, Rational(1, 3), evaluate=evaluate)
def root(arg, n, k=0, evaluate=None):
r"""Returns the *k*-th *n*-th root of ``arg``.
Parameters
==========
k : int, optional
Should be an integer in $\{0, 1, ..., n-1\}$.
Defaults to the principal root if $0$.
evaluate : bool, optional
The parameter determines if the expression should be evaluated.
If ``None``, its value is taken from
``global_parameters.evaluate``.
Examples
========
>>> from sympy import root, Rational
>>> from sympy.abc import x, n
>>> root(x, 2)
sqrt(x)
>>> root(x, 3)
x**(1/3)
>>> root(x, n)
x**(1/n)
>>> root(x, -Rational(2, 3))
x**(-3/2)
To get the k-th n-th root, specify k:
>>> root(-2, 3, 2)
-(-1)**(2/3)*2**(1/3)
To get all n n-th roots you can use the rootof function.
The following examples show the roots of unity for n
equal 2, 3 and 4:
>>> from sympy import rootof
>>> [rootof(x**2 - 1, i) for i in range(2)]
[-1, 1]
>>> [rootof(x**3 - 1,i) for i in range(3)]
[1, -1/2 - sqrt(3)*I/2, -1/2 + sqrt(3)*I/2]
>>> [rootof(x**4 - 1,i) for i in range(4)]
[-1, 1, -I, I]
SymPy, like other symbolic algebra systems, returns the
complex root of negative numbers. This is the principal
root and differs from the text-book result that one might
be expecting. For example, the cube root of -8 does not
come back as -2:
>>> root(-8, 3)
2*(-1)**(1/3)
The real_root function can be used to either make the principal
result real (or simply to return the real root directly):
>>> from sympy import real_root
>>> real_root(_)
-2
>>> real_root(-32, 5)
-2
Alternatively, the n//2-th n-th root of a negative number can be
computed with root:
>>> root(-32, 5, 5//2)
-2
See Also
========
sympy.polys.rootoftools.rootof
sympy.core.intfunc.integer_nthroot
sqrt, real_root
References
==========
.. [1] https://en.wikipedia.org/wiki/Square_root
.. [2] https://en.wikipedia.org/wiki/Real_root
.. [3] https://en.wikipedia.org/wiki/Root_of_unity
.. [4] https://en.wikipedia.org/wiki/Principal_value
.. [5] https://mathworld.wolfram.com/CubeRoot.html
"""
n = sympify(n)
if k:
return Mul(Pow(arg, S.One/n, evaluate=evaluate), S.NegativeOne**(2*k/n), evaluate=evaluate)
return Pow(arg, 1/n, evaluate=evaluate)
def real_root(arg, n=None, evaluate=None):
r"""Return the real *n*'th-root of *arg* if possible.
Parameters
==========
n : int or None, optional
If *n* is ``None``, then all instances of
$(-n)^{1/\text{odd}}$ will be changed to $-n^{1/\text{odd}}$.
This will only create a real root of a principal root.
The presence of other factors may cause the result to not be
real.
evaluate : bool, optional
The parameter determines if the expression should be evaluated.
If ``None``, its value is taken from
``global_parameters.evaluate``.
Examples
========
>>> from sympy import root, real_root
>>> real_root(-8, 3)
-2
>>> root(-8, 3)
2*(-1)**(1/3)
>>> real_root(_)
-2
If one creates a non-principal root and applies real_root, the
result will not be real (so use with caution):
>>> root(-8, 3, 2)
-2*(-1)**(2/3)
>>> real_root(_)
-2*(-1)**(2/3)
See Also
========
sympy.polys.rootoftools.rootof
sympy.core.intfunc.integer_nthroot
root, sqrt
"""
from sympy.functions.elementary.complexes import Abs, im, sign
from sympy.functions.elementary.piecewise import Piecewise
if n is not None:
return Piecewise(
(root(arg, n, evaluate=evaluate), Or(Eq(n, S.One), Eq(n, S.NegativeOne))),
(Mul(sign(arg), root(Abs(arg), n, evaluate=evaluate), evaluate=evaluate),
And(Eq(im(arg), S.Zero), Eq(Mod(n, 2), S.One))),
(root(arg, n, evaluate=evaluate), True))
rv = sympify(arg)
n1pow = Transform(lambda x: -(-x.base)**x.exp,
lambda x:
x.is_Pow and
x.base.is_negative and
x.exp.is_Rational and
x.exp.p == 1 and x.exp.q % 2)
return rv.xreplace(n1pow)
###############################################################################
############################# MINIMUM and MAXIMUM #############################
###############################################################################
| IdentityFunction |
python | arrow-py__arrow | tests/test_factory.py | {
"start": 12610,
"end": 12828
} | class ____:
def test_utcnow(self):
assert_datetime_equality(
self.factory.utcnow()._datetime,
datetime.now(timezone.utc),
)
@pytest.mark.usefixtures("arrow_factory")
| TestUtcNow |
python | great-expectations__great_expectations | tests/datasource/fluent/test_metadatasource.py | {
"start": 10243,
"end": 24276
} | class ____:
def test_ds_type_field_not_set(self, empty_sources: DataSourceManager):
with pytest.raises(
TypeRegistrationError,
match=r"`MissingTypeDatasource` is missing a `type` attribute",
):
class MissingTypeDatasource(Datasource):
@property
@override
def execution_engine_type(self) -> Type[ExecutionEngine]:
return DummyExecutionEngine
@override
def test_connection(self) -> None: ... # type: ignore[override] # FIXME CoP
# check that no types were registered
assert len(empty_sources.type_lookup) < 1
def test_ds_execution_engine_type_not_defined(self, empty_sources: DataSourceManager):
class MissingExecEngineTypeDatasource(Datasource):
type: str = "valid"
@override
def test_connection(self) -> None: ... # type: ignore[override] # FIXME CoP
with pytest.raises(NotImplementedError):
MissingExecEngineTypeDatasource(name="name").get_execution_engine()
def test_ds_assets_type_field_not_set(self, empty_sources: DataSourceManager):
with pytest.raises(
TypeRegistrationError,
match=r"No `type` field found for `BadAssetDatasource.asset_types` -> `MissingTypeAsset` unable to register asset type", # noqa: E501 # FIXME CoP
):
class MissingTypeAsset(DataAsset):
@override
def get_batch_identifiers_list(self, batch_request: BatchRequest) -> List[dict]:
raise NotImplementedError
@override
def get_batch(self, batch_request: BatchRequest) -> Batch:
raise NotImplementedError
class BadAssetDatasource(Datasource):
type: str = "valid"
asset_types: ClassVar[List[Type[DataAsset]]] = [MissingTypeAsset]
@property
@override
def execution_engine_type(self) -> Type[ExecutionEngine]:
return DummyExecutionEngine
@override
def test_connection(self) -> None: ... # type: ignore[override] # FIXME CoP
# check that no types were registered
assert len(empty_sources.type_lookup) < 1
def test_ds_test_connection_not_defined(self, empty_sources: DataSourceManager):
class MissingTestConnectionDatasource(Datasource):
type: str = "valid"
@property
@override
def execution_engine_type(self) -> Type[ExecutionEngine]:
return DummyExecutionEngine
with pytest.raises(NotImplementedError):
MissingTestConnectionDatasource(name="name").test_connection()
@pytest.mark.big
def test_minimal_ds_to_asset_flow(context_sources_cleanup):
# 1. Define Datasource & Assets
class SampleAsset(DataAsset):
@override
def get_batch_identifiers_list(self, batch_request: BatchRequest) -> List[dict]:
raise NotImplementedError
@override
def get_batch(self, batch_request: BatchRequest) -> Batch:
raise NotImplementedError
class RedAsset(SampleAsset):
type = "red"
def test_connection(self): ... # type: ignore[explicit-override] # FIXME
class BlueAsset(SampleAsset):
type = "blue"
@override
def test_connection(self): ...
class PurpleDatasource(Datasource):
asset_types = [RedAsset, BlueAsset]
type: str = "purple"
@property
@override
def execution_engine_type(self) -> Type[ExecutionEngine]:
return DummyExecutionEngine
def test_connection(self): ... # type: ignore[explicit-override] # FIXME
def add_red_asset(self, asset_name: str) -> RedAsset:
asset = RedAsset(name=asset_name) # type: ignore[call-arg] # ?
self._add_asset(asset=asset)
return asset
# 2. Get context
context = get_context()
# 3. Add a datasource
purple_ds: Datasource = context.data_sources.add_purple("my_ds_name")
# 4. Add a DataAsset
red_asset: DataAsset = purple_ds.add_red_asset("my_asset_name")
assert isinstance(red_asset, RedAsset)
# 5. Get an asset by name - (method defined in parent `Datasource`)
assert red_asset is purple_ds.get_asset("my_asset_name")
# Testing crud methods
DEFAULT_CRUD_DATASOURCE_NAME = "pandas_datasource"
@pytest.fixture
def context_config_data(
file_dc_config_dir_init: pathlib.Path,
) -> Tuple[AbstractDataContext, pathlib.Path, pathlib.Path]:
config_file_path = file_dc_config_dir_init / FileDataContext.GX_YML
assert config_file_path.exists() is True
context = get_gx_context(context_root_dir=file_dc_config_dir_init)
data_dir = file_dc_config_dir_init.parent / "data"
data_dir.mkdir()
return context, config_file_path, data_dir
def assert_fluent_datasource_content(
config_file_path: pathlib.Path, fluent_datasource_config: dict
):
config = yaml.load(config_file_path.read_text())
assert _FLUENT_DATASOURCES_KEY in config
config_from_gx_yaml = config[_FLUENT_DATASOURCES_KEY]
assert isinstance(config_from_gx_yaml, dict)
config_from_gx_yaml_without_ids = _remove_ids(config_from_gx_yaml)
assert config_from_gx_yaml_without_ids == fluent_datasource_config
def _remove_ids(config: dict) -> dict:
for data_source in config.values():
data_source.pop("id")
for asset in data_source.get("assets", {}).values():
asset.pop("id")
for batch_definition in asset.get("batch_definitions", []):
batch_definition.pop("id")
return config
@pytest.fixture
def context_with_fluent_datasource(
context_config_data: Tuple[AbstractDataContext, pathlib.Path, pathlib.Path],
) -> Tuple[AbstractDataContext, pathlib.Path, pathlib.Path]:
context, config_file_path, data_dir = context_config_data
assert len(context.data_sources.all()) == 0
context.data_sources.add_pandas_filesystem(
name=DEFAULT_CRUD_DATASOURCE_NAME,
base_directory=data_dir,
data_context_root_directory=config_file_path.parent,
)
assert len(context.data_sources.all()) == 1
assert_fluent_datasource_content(
config_file_path=config_file_path,
fluent_datasource_config={
DEFAULT_CRUD_DATASOURCE_NAME: {
"base_directory": str(data_dir),
"data_context_root_directory": str(config_file_path.parent),
"type": "pandas_filesystem",
},
},
)
return context, config_file_path, data_dir
@pytest.mark.unit
def test_add_datasource(context_with_fluent_datasource):
pass
@pytest.mark.unit
@pytest.mark.parametrize("use_positional_arg", [True, False])
def test_add_datasource_with_datasource_object(context_with_fluent_datasource, use_positional_arg):
context, config_file_path, data_dir = context_with_fluent_datasource
new_datasource = copy.deepcopy(context.data_sources.get(DEFAULT_CRUD_DATASOURCE_NAME))
new_datasource.name = "new_datasource"
if use_positional_arg:
context.data_sources.add_pandas_filesystem(new_datasource)
else:
context.data_sources.add_pandas_filesystem(datasource=new_datasource)
assert len(context.data_sources.all()) == 2
assert_fluent_datasource_content(
config_file_path=config_file_path,
fluent_datasource_config={
"pandas_datasource": {
"base_directory": str(data_dir),
"data_context_root_directory": str(config_file_path.parent),
"type": "pandas_filesystem",
},
"new_datasource": {
"base_directory": str(data_dir),
"data_context_root_directory": str(config_file_path.parent),
"type": "pandas_filesystem",
},
},
)
@pytest.mark.unit
@pytest.mark.parametrize("use_positional_arg", [True, False])
def test_update_datasource(context_with_fluent_datasource, use_positional_arg):
context, config_file_path, data_dir = context_with_fluent_datasource
data_dir_2 = data_dir.parent / "data2"
data_dir_2.mkdir()
if use_positional_arg:
context.data_sources.update_pandas_filesystem(
DEFAULT_CRUD_DATASOURCE_NAME,
base_directory=data_dir_2,
data_context_root_directory=config_file_path.parent,
)
else:
context.data_sources.update_pandas_filesystem(
name=DEFAULT_CRUD_DATASOURCE_NAME,
base_directory=data_dir_2,
data_context_root_directory=config_file_path.parent,
)
assert_fluent_datasource_content(
config_file_path=config_file_path,
fluent_datasource_config={
DEFAULT_CRUD_DATASOURCE_NAME: {
"base_directory": str(data_dir_2),
"data_context_root_directory": str(config_file_path.parent),
"type": "pandas_filesystem",
},
},
)
@pytest.mark.unit
@pytest.mark.parametrize("use_positional_arg", [True, False])
def test_update_datasource_with_datasource_object(
context_with_fluent_datasource, use_positional_arg
):
context, config_file_path, data_dir = context_with_fluent_datasource
datasource = context.data_sources.get(DEFAULT_CRUD_DATASOURCE_NAME)
assert_fluent_datasource_content(
config_file_path=config_file_path,
fluent_datasource_config={
DEFAULT_CRUD_DATASOURCE_NAME: {
"base_directory": str(data_dir),
"data_context_root_directory": str(config_file_path.parent),
"type": "pandas_filesystem",
},
},
)
# Add an asset and update datasource
(data_dir / "1.csv").touch()
if use_positional_arg:
datasource.add_csv_asset("csv_asset")
else:
datasource.add_csv_asset(name="csv_asset")
context.data_sources.update_pandas_filesystem(datasource=datasource)
assert_fluent_datasource_content(
config_file_path=config_file_path,
fluent_datasource_config={
DEFAULT_CRUD_DATASOURCE_NAME: {
"base_directory": str(data_dir),
"data_context_root_directory": str(config_file_path.parent),
"type": "pandas_filesystem",
"assets": {
"csv_asset": {
"type": "csv",
},
},
},
},
)
@pytest.mark.unit
@pytest.mark.parametrize("use_positional_arg", [True, False])
def test_add_or_update_datasource_using_add(context_with_fluent_datasource, use_positional_arg):
context, config_file_path, data_dir = context_with_fluent_datasource
data_dir_2 = data_dir.parent / "data2"
data_dir_2.mkdir()
if use_positional_arg:
context.data_sources.add_or_update_pandas_filesystem(
f"{DEFAULT_CRUD_DATASOURCE_NAME}_2",
base_directory=data_dir_2,
data_context_root_directory=config_file_path.parent,
)
else:
context.data_sources.add_or_update_pandas_filesystem(
name=f"{DEFAULT_CRUD_DATASOURCE_NAME}_2",
base_directory=data_dir_2,
data_context_root_directory=config_file_path.parent,
)
assert_fluent_datasource_content(
config_file_path=config_file_path,
fluent_datasource_config={
f"{DEFAULT_CRUD_DATASOURCE_NAME}_2": {
"base_directory": str(data_dir_2),
"data_context_root_directory": str(config_file_path.parent),
"type": "pandas_filesystem",
},
DEFAULT_CRUD_DATASOURCE_NAME: {
"base_directory": str(data_dir),
"data_context_root_directory": str(config_file_path.parent),
"type": "pandas_filesystem",
},
},
)
@pytest.mark.unit
@pytest.mark.parametrize("use_positional_arg", [True, False])
def test_add_or_update_datasource_using_update(context_with_fluent_datasource, use_positional_arg):
context, config_file_path, data_dir = context_with_fluent_datasource
data_dir_2 = data_dir.parent / "data2"
data_dir_2.mkdir()
if use_positional_arg:
context.data_sources.add_or_update_pandas_filesystem(
DEFAULT_CRUD_DATASOURCE_NAME,
base_directory=data_dir_2,
data_context_root_directory=config_file_path.parent,
)
else:
context.data_sources.add_or_update_pandas_filesystem(
name=DEFAULT_CRUD_DATASOURCE_NAME,
base_directory=data_dir_2,
data_context_root_directory=config_file_path.parent,
)
assert_fluent_datasource_content(
config_file_path=config_file_path,
fluent_datasource_config={
DEFAULT_CRUD_DATASOURCE_NAME: {
"base_directory": str(data_dir_2),
"data_context_root_directory": str(config_file_path.parent),
"type": "pandas_filesystem",
},
},
)
@pytest.mark.xfail(
reason="There is a bug in context._save_config where deletions don't get persisted",
run=True,
strict=True,
)
@pytest.mark.unit
def test_delete_datasource(context_with_fluent_datasource):
context, config_file_path, _data_dir = context_with_fluent_datasource
context.data_sources.delete(name=DEFAULT_CRUD_DATASOURCE_NAME)
assert_fluent_datasource_content(config_file_path, {})
@pytest.mark.unit
def test_legacy_delete_datasource_raises_deprecation_warning(
context_with_fluent_datasource,
):
context, _, _ = context_with_fluent_datasource
with pytest.deprecated_call():
context.data_sources.delete_pandas_filesystem(name=DEFAULT_CRUD_DATASOURCE_NAME)
if __name__ == "__main__":
pytest.main([__file__, "-vv", "--log-level=DEBUG"])
| TestMisconfiguredMetaDatasource |
python | doocs__leetcode | solution/1000-1099/1057.Campus Bikes/Solution.py | {
"start": 0,
"end": 604
} | class ____:
def assignBikes(
self, workers: List[List[int]], bikes: List[List[int]]
) -> List[int]:
n, m = len(workers), len(bikes)
arr = []
for i, j in product(range(n), range(m)):
dist = abs(workers[i][0] - bikes[j][0]) + abs(workers[i][1] - bikes[j][1])
arr.append((dist, i, j))
arr.sort()
vis1 = [False] * n
vis2 = [False] * m
ans = [0] * n
for _, i, j in arr:
if not vis1[i] and not vis2[j]:
vis1[i] = vis2[j] = True
ans[i] = j
return ans
| Solution |
python | huggingface__transformers | tests/models/bamba/test_modeling_bamba.py | {
"start": 1570,
"end": 9734
} | class ____:
config_class = BambaConfig
if is_torch_available():
model_class = BambaModel
for_causal_lm_class = BambaForCausalLM
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
num_key_value_heads=2,
intermediate_size=64,
hidden_act="silu",
attention_dropout=0.0,
attn_layer_indices=None,
attn_rotary_emb=8,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
num_labels=3,
pad_token_id=0,
mamba_n_groups=1,
mamba_n_heads=16,
mamba_d_state=16,
mamba_d_conv=4,
mamba_expand=2,
mamba_chunk_size=16,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.attention_dropout = attention_dropout
self.attn_layer_indices = attn_layer_indices
self.attn_rotary_emb = attn_rotary_emb
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.pad_token_id = pad_token_id
self.scope = scope
self.mamba_n_groups = mamba_n_groups
self.mamba_n_heads = mamba_n_heads
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_expand = mamba_expand
self.mamba_chunk_size = mamba_chunk_size
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
token_labels = None
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
self._update_layer_configs()
config = self.get_config()
return config, input_ids, input_mask, token_labels
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
token_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
def _update_layer_configs(self):
"""Configures hidden layers and attn layer indices if they are not set."""
# Fix for SDPA tests, force at least 4 layers
if self.num_hidden_layers < 4:
self.num_hidden_layers = 4
if self.attn_layer_indices is None:
d = [x for x in range(2, self.num_hidden_layers) if self.num_hidden_layers % x == 0]
if len(d) == 0:
raise ValueError("num_hidden_layers is prime, cannot automatically set attn_layer_indices.")
d = d[-1] # get the largest divisor
self.attn_layer_indices = [x + 1 for x in range(0, self.num_hidden_layers, d)]
def get_config(self, **kwargs):
return self.config_class(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
attention_dropout=self.attention_dropout,
attn_layer_indices=self.attn_layer_indices,
attn_rotary_emb=self.attn_rotary_emb,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
mamba_n_groups=self.mamba_n_groups,
mamba_n_heads=self.mamba_n_heads,
mamba_d_state=self.mamba_d_state,
mamba_d_conv=self.mamba_d_conv,
mamba_expand=self.mamba_expand,
mamba_chunk_size=self.mamba_chunk_size,
**kwargs,
)
def create_and_check_model(
self,
config,
input_ids,
input_mask,
token_labels,
):
model = self.model_class(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
input_mask,
token_labels,
):
model = self.for_causal_lm_class(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids, labels=token_labels)
result = model(input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
input_mask,
token_labels,
):
# config.is_decoder = True
# config.add_cross_attention = True
model = self.for_causal_lm_class(config=config)
model.to(torch_device)
model.eval()
# first forward pass
# Attention: Jamba needs the cache to be initialized to return a cache!
past_key_values = HybridMambaAttentionDynamicCache(
config, input_ids.shape[0], model.dtype, device=model.device
)
outputs = model(
input_ids,
attention_mask=input_mask,
past_key_values=past_key_values,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
cache_position=torch.arange(
input_ids.shape[1], input_ids.shape[1] + next_tokens.shape[1], device=model.device
),
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
@require_torch
| BambaModelTester |
python | numba__llvmlite | llvmlite/tests/test_refprune.py | {
"start": 12096,
"end": 15660
} | class ____(BaseTestByIR):
refprune_bitmask = llvm.RefPruneSubpasses.FANOUT_RAISE
fanout_raise_1 = r"""
define i32 @main(i8* %ptr, i1 %cond, i8** %excinfo) {
bb_A:
call void @NRT_incref(i8* %ptr)
br i1 %cond, label %bb_B, label %bb_C
bb_B:
call void @NRT_decref(i8* %ptr)
ret i32 0
bb_C:
store i8* null, i8** %excinfo, !numba_exception_output !0
ret i32 1
}
!0 = !{i1 true}
"""
def test_fanout_raise_1(self):
mod, stats = self.check(self.fanout_raise_1)
self.assertEqual(stats.fanout_raise, 2)
fanout_raise_2 = r"""
define i32 @main(i8* %ptr, i1 %cond, i8** %excinfo) {
bb_A:
call void @NRT_incref(i8* %ptr)
br i1 %cond, label %bb_B, label %bb_C
bb_B:
call void @NRT_decref(i8* %ptr)
ret i32 0
bb_C:
store i8* null, i8** %excinfo, !numba_exception_typo !0 ; bad metadata
ret i32 1
}
!0 = !{i1 true}
"""
def test_fanout_raise_2(self):
# This is ensuring that fanout_raise is not pruning when the metadata
# is incorrectly named.
mod, stats = self.check(self.fanout_raise_2)
self.assertEqual(stats.fanout_raise, 0)
fanout_raise_3 = r"""
define i32 @main(i8* %ptr, i1 %cond, i8** %excinfo) {
bb_A:
call void @NRT_incref(i8* %ptr)
br i1 %cond, label %bb_B, label %bb_C
bb_B:
call void @NRT_decref(i8* %ptr)
ret i32 0
bb_C:
store i8* null, i8** %excinfo, !numba_exception_output !0
ret i32 1
}
!0 = !{i32 1} ; ok; use i32
"""
def test_fanout_raise_3(self):
mod, stats = self.check(self.fanout_raise_3)
self.assertEqual(stats.fanout_raise, 2)
fanout_raise_4 = r"""
define i32 @main(i8* %ptr, i1 %cond, i8** %excinfo) {
bb_A:
call void @NRT_incref(i8* %ptr)
br i1 %cond, label %bb_B, label %bb_C
bb_B:
ret i32 1 ; BAD; all tails are raising without decref
bb_C:
ret i32 1 ; BAD; all tails are raising without decref
}
!0 = !{i1 1}
"""
def test_fanout_raise_4(self):
mod, stats = self.check(self.fanout_raise_4)
self.assertEqual(stats.fanout_raise, 0)
fanout_raise_5 = r"""
define i32 @main(i8* %ptr, i1 %cond, i8** %excinfo) {
bb_A:
call void @NRT_incref(i8* %ptr)
br i1 %cond, label %bb_B, label %bb_C
bb_B:
call void @NRT_decref(i8* %ptr)
br label %common.ret
bb_C:
store i8* null, i8** %excinfo, !numba_exception_output !0
br label %common.ret
common.ret:
%common.ret.op = phi i32 [ 0, %bb_B ], [ 1, %bb_C ]
ret i32 %common.ret.op
}
!0 = !{i1 1}
"""
def test_fanout_raise_5(self):
mod, stats = self.check(self.fanout_raise_5)
self.assertEqual(stats.fanout_raise, 2)
# test case 6 is from https://github.com/numba/llvmlite/issues/1023
fanout_raise_6 = r"""
define i32 @main(i8* %ptr, i1 %cond1, i1 %cond2, i1 %cond3, i8** %excinfo) {
bb_A:
call void @NRT_incref(i8* %ptr)
call void @NRT_incref(i8* %ptr)
br i1 %cond1, label %bb_B, label %bb_C
bb_B:
call void @NRT_decref(i8* %ptr)
br i1 %cond2, label %bb_D, label %bb_E
bb_C:
store i8* null, i8** %excinfo, !numba_exception_output !0
ret i32 1
bb_D:
call void @NRT_decref(i8* %ptr)
ret i32 0
bb_E:
call void @NRT_incref(i8* %ptr)
br i1 %cond3, label %bb_F, label %bb_C
bb_F:
call void @NRT_decref(i8* %ptr)
call void @NRT_decref(i8* %ptr)
ret i32 0
}
!0 = !{i1 1}
"""
def test_fanout_raise_6(self):
mod, stats = self.check(self.fanout_raise_6)
self.assertEqual(stats.fanout_raise, 7)
if __name__ == '__main__':
unittest.main()
| TestFanoutRaise |
python | astropy__astropy | astropy/visualization/wcsaxes/frame.py | {
"start": 4750,
"end": 9378
} | class ____(OrderedDict, metaclass=abc.ABCMeta):
"""
Base class for frames, which are collections of
:class:`~astropy.visualization.wcsaxes.frame.Spine` instances.
"""
spine_class = Spine
def __init__(self, parent_axes, transform, path=None):
super().__init__()
self.parent_axes = parent_axes
self._transform = transform
self._linewidth = rcParams["axes.linewidth"]
self._color = rcParams["axes.edgecolor"]
self._path = path
for axis in self.spine_names:
self[axis] = self.spine_class(parent_axes, transform)
@property
def origin(self):
ymin, ymax = self.parent_axes.get_ylim()
return "lower" if ymin < ymax else "upper"
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
self._transform = value
for axis in self:
self[axis].transform = value
def _update_patch_path(self):
self.update_spines()
x, y = [], []
for axis in self.spine_names:
x.append(self[axis].data[:, 0])
y.append(self[axis].data[:, 1])
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
@property
def patch(self):
self._update_patch_path()
return PathPatch(
self._path,
transform=self.parent_axes.transData,
facecolor=rcParams["axes.facecolor"],
edgecolor="white",
)
def draw(self, renderer):
for axis in self:
pixel = self[axis]._get_pixel()
x, y = pixel[:, 0], pixel[:, 1]
line = Line2D(
x, y, linewidth=self._linewidth, color=self._color, zorder=1000
)
line.draw(renderer)
def sample(self, n_samples):
self.update_spines()
spines = OrderedDict()
for axis in self:
data = self[axis].data
spines[axis] = self.spine_class(self.parent_axes, self.transform)
if data.size > 0:
p = np.linspace(0.0, 1.0, data.shape[0])
p_new = np.linspace(0.0, 1.0, n_samples)
spines[axis].data = np.array(
[np.interp(p_new, p, d) for d in data.T]
).transpose()
else:
spines[axis].data = data
return spines
def set_color(self, color):
"""
Sets the color of the frame.
Parameters
----------
color : str
The color of the frame.
"""
self._color = color
def get_color(self):
return self._color
def set_linewidth(self, linewidth):
"""
Sets the linewidth of the frame.
Parameters
----------
linewidth : float
The linewidth of the frame in points.
"""
self._linewidth = linewidth
def get_linewidth(self):
return self._linewidth
def update_spines(self):
for spine in self.values():
if spine.data_func:
spine.data = spine.data_func(spine)
def _validate_positions(self, positions):
"""
Given a string with single character positions or an iterable with
single or multi-character positions, emit a warning for any
unrecognized positions and return a list of valid positions.
"""
if positions == "all":
return positions
valid_positions = []
invalid_positions = []
for position in positions:
if position in self or position == "#":
valid_positions.append(position)
else:
invalid_positions.append(position)
if invalid_positions:
if isinstance(positions, str) and positions in self:
hint = (
f"It looks like '{positions}' matches the name of a single "
f"axis. If you are trying to specify a multi-character axis "
f"name, use a list or a tuple, e.g. ('{positions}',)."
)
else:
hint = ""
warnings.warn(
f"Ignoring unrecognized position(s): {invalid_positions}, should "
f"be one of {'/'.join(self.keys())}. In future this will "
f"raise an error. {hint}",
AstropyDeprecationWarning,
)
return valid_positions
| BaseFrame |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/dataclass_taint.py | {
"start": 611,
"end": 842
} | class ____:
bad: int
def __init__(self, bad: int) -> None:
self.bad = bad
_test_sink(bad)
def issue_in_dataclass_constructor() -> None:
DataClassWIthInit(bad=_test_source())
@dataclass
| DataClassWIthInit |
python | sympy__sympy | sympy/physics/quantum/spin.py | {
"start": 36499,
"end": 40294
} | class ____(SpinState, Ket):
"""Eigenket of Jz.
Spin state which is an eigenstate of the Jz operator. Uncoupled states,
that is states representing the interaction of multiple separate spin
states, are defined as a tensor product of states.
Parameters
==========
j : Number, Symbol
Total spin angular momentum
m : Number, Symbol
Eigenvalue of the Jz spin operator
Examples
========
*Normal States:*
Defining simple spin states, both numerical and symbolic:
>>> from sympy.physics.quantum.spin import JzKet, JxKet
>>> from sympy import symbols
>>> JzKet(1, 0)
|1,0>
>>> j, m = symbols('j m')
>>> JzKet(j, m)
|j,m>
Rewriting the JzKet in terms of eigenkets of the Jx operator:
Note: that the resulting eigenstates are JxKet's
>>> JzKet(1,1).rewrite("Jx")
|1,-1>/2 - sqrt(2)*|1,0>/2 + |1,1>/2
Get the vector representation of a state in terms of the basis elements
of the Jx operator:
>>> from sympy.physics.quantum.represent import represent
>>> from sympy.physics.quantum.spin import Jx, Jz
>>> represent(JzKet(1,-1), basis=Jx)
Matrix([
[ 1/2],
[sqrt(2)/2],
[ 1/2]])
Apply innerproducts between states:
>>> from sympy.physics.quantum.innerproduct import InnerProduct
>>> from sympy.physics.quantum.spin import JxBra
>>> i = InnerProduct(JxBra(1,1), JzKet(1,1))
>>> i
<1,1|1,1>
>>> i.doit()
1/2
*Uncoupled States:*
Define an uncoupled state as a TensorProduct between two Jz eigenkets:
>>> from sympy.physics.quantum.tensorproduct import TensorProduct
>>> j1,m1,j2,m2 = symbols('j1 m1 j2 m2')
>>> TensorProduct(JzKet(1,0), JzKet(1,1))
|1,0>|1,1>
>>> TensorProduct(JzKet(j1,m1), JzKet(j2,m2))
|j1,m1>|j2,m2>
A TensorProduct can be rewritten, in which case the eigenstates that make
up the tensor product is rewritten to the new basis:
>>> TensorProduct(JzKet(1,1),JxKet(1,1)).rewrite('Jz')
|1,1>|1,-1>/2 + sqrt(2)*|1,1>|1,0>/2 + |1,1>|1,1>/2
The represent method for TensorProduct's gives the vector representation of
the state. Note that the state in the product basis is the equivalent of the
tensor product of the vector representation of the component eigenstates:
>>> represent(TensorProduct(JzKet(1,0),JzKet(1,1)))
Matrix([
[0],
[0],
[0],
[1],
[0],
[0],
[0],
[0],
[0]])
>>> represent(TensorProduct(JzKet(1,1),JxKet(1,1)), basis=Jz)
Matrix([
[ 1/2],
[sqrt(2)/2],
[ 1/2],
[ 0],
[ 0],
[ 0],
[ 0],
[ 0],
[ 0]])
See Also
========
JzKetCoupled: Coupled eigenstates
sympy.physics.quantum.tensorproduct.TensorProduct: Used to specify uncoupled states
uncouple: Uncouples states given coupling parameters
couple: Couples uncoupled states
"""
@classmethod
def dual_class(self):
return JzBra
@classmethod
def coupled_class(self):
return JzKetCoupled
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_base(beta=pi*Rational(3, 2), **options)
def _represent_JyOp(self, basis, **options):
return self._represent_base(alpha=pi*Rational(3, 2), beta=pi/2, gamma=pi/2, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(**options)
| JzKet |
python | facebook__pyre-check | tools/generate_taint_models/tests/get_undecorated_sources_test.py | {
"start": 631,
"end": 3881
} | class ____(unittest.TestCase):
@patch.object(RESTApiSourceGenerator, "generate_models")
# pyre-fixme[56]: Argument
# `"{}.AnnotatedFreeFunctionWithDecoratorGenerator".format(tools.pyre.tools.generate_taint_models.get_undecorated_sources.__name__)`
# to decorator factory `unittest.mock.patch` could not be resolved in a global
# scope.
@patch(
"{}.AnnotatedFreeFunctionWithDecoratorGenerator".format(undecorated_source_name)
)
def test_compute_models(
self,
mock_annotated_decorator: MagicMock,
mock_RESTapi_decorator_generate_models: MagicMock,
) -> None:
mock_RESTapi_decorator_generate_models.return_value = {
CallableModel(
testA,
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[UserControlled]",
vararg="TaintSource[UserControlled]",
kwarg="TaintSource[UserControlled]",
),
),
CallableModel(
testB,
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[UserControlled]",
vararg="TaintSource[UserControlled]",
kwarg="TaintSource[UserControlled]",
),
),
CallableModel(
TestClass().methodA,
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[UserControlled]",
vararg="TaintSource[UserControlled]",
kwarg="TaintSource[UserControlled]",
),
),
}
generator_instance = MagicMock()
generator_instance.generate_models.return_value = {
FunctionDefinitionModel(
# pyre-ignore: Incompatible parameter type [6]
ast.parse("def testA(): pass").body[0],
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[UserControlled]",
vararg="TaintSource[UserControlled]",
kwarg="TaintSource[UserControlled]",
),
qualifier="tools.pyre.tools.generate_taint_models.tests.test_functions",
)
}
mock_annotated_decorator.side_effect = [generator_instance]
self.maxDiff = None
self.assertEqual(
{
*map(
str,
UndecoratedSourceGenerator(
source_generator=RESTApiSourceGenerator(
django_urls=MagicMock()
),
root="/root",
decorators_to_filter=[],
).compute_models(all_functions),
)
},
{
"def tools.pyre.tools.generate_taint_models.tests.test_functions."
"TestClass.methodA(self: TaintSource[UserControlled], x: "
"TaintSource[UserControlled]): ...",
"def tools.pyre.tools.generate_taint_models.tests.test_functions."
"testB(x: TaintSource[UserControlled]): ...",
},
)
| GetUndecoratedSourcesTest |
python | pypa__pip | src/pip/_vendor/pygments/lexers/python.py | {
"start": 30360,
"end": 31955
} | class ____(DelegatingLexer):
"""
For Python console output or doctests, such as:
.. sourcecode:: pycon
>>> a = 'foo'
>>> print(a)
foo
>>> 1 / 0
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ZeroDivisionError: integer division or modulo by zero
Additional options:
`python3`
Use Python 3 lexer for code. Default is ``True``.
.. versionadded:: 1.0
.. versionchanged:: 2.5
Now defaults to ``True``.
"""
name = 'Python console session'
aliases = ['pycon', 'python-console']
mimetypes = ['text/x-python-doctest']
url = 'https://python.org'
version_added = ''
def __init__(self, **options):
python3 = get_bool_opt(options, 'python3', True)
if python3:
pylexer = PythonLexer
tblexer = PythonTracebackLexer
else:
pylexer = Python2Lexer
tblexer = Python2TracebackLexer
# We have two auxiliary lexers. Use DelegatingLexer twice with
# different tokens. TODO: DelegatingLexer should support this
# directly, by accepting a tuplet of auxiliary lexers and a tuple of
# distinguishing tokens. Then we wouldn't need this intermediary
# class.
class _ReplaceInnerCode(DelegatingLexer):
def __init__(self, **options):
super().__init__(pylexer, _PythonConsoleLexerBase, Other.Code, **options)
super().__init__(tblexer, _ReplaceInnerCode, Other.Traceback, **options)
| PythonConsoleLexer |
python | sympy__sympy | sympy/physics/quantum/cg.py | {
"start": 1164,
"end": 4480
} | class ____(Expr):
"""Class for the Wigner-3j symbols.
Explanation
===========
Wigner 3j-symbols are coefficients determined by the coupling of
two angular momenta. When created, they are expressed as symbolic
quantities that, for numerical parameters, can be evaluated using the
``.doit()`` method [1]_.
Parameters
==========
j1, m1, j2, m2, j3, m3 : Number, Symbol
Terms determining the angular momentum of coupled angular momentum
systems.
Examples
========
Declare a Wigner-3j coefficient and calculate its value
>>> from sympy.physics.quantum.cg import Wigner3j
>>> w3j = Wigner3j(6,0,4,0,2,0)
>>> w3j
Wigner3j(6, 0, 4, 0, 2, 0)
>>> w3j.doit()
sqrt(715)/143
See Also
========
CG: Clebsch-Gordan coefficients
References
==========
.. [1] Varshalovich, D A, Quantum Theory of Angular Momentum. 1988.
"""
is_commutative = True
def __new__(cls, j1, m1, j2, m2, j3, m3):
args = map(sympify, (j1, m1, j2, m2, j3, m3))
return Expr.__new__(cls, *args)
@property
def j1(self):
return self.args[0]
@property
def m1(self):
return self.args[1]
@property
def j2(self):
return self.args[2]
@property
def m2(self):
return self.args[3]
@property
def j3(self):
return self.args[4]
@property
def m3(self):
return self.args[5]
@property
def is_symbolic(self):
return not all(arg.is_number for arg in self.args)
# This is modified from the _print_Matrix method
def _pretty(self, printer, *args):
m = ((printer._print(self.j1), printer._print(self.m1)),
(printer._print(self.j2), printer._print(self.m2)),
(printer._print(self.j3), printer._print(self.m3)))
hsep = 2
vsep = 1
maxw = [-1]*3
for j in range(3):
maxw[j] = max(m[j][i].width() for i in range(2))
D = None
for i in range(2):
D_row = None
for j in range(3):
s = m[j][i]
wdelta = maxw[j] - s.width()
wleft = wdelta //2
wright = wdelta - wleft
s = prettyForm(*s.right(' '*wright))
s = prettyForm(*s.left(' '*wleft))
if D_row is None:
D_row = s
continue
D_row = prettyForm(*D_row.right(' '*hsep))
D_row = prettyForm(*D_row.right(s))
if D is None:
D = D_row
continue
for _ in range(vsep):
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
D = prettyForm(*D.parens())
return D
def _latex(self, printer, *args):
label = map(printer._print, (self.j1, self.j2, self.j3,
self.m1, self.m2, self.m3))
return r'\left(\begin{array}{ccc} %s & %s & %s \\ %s & %s & %s \end{array}\right)' % \
tuple(label)
def doit(self, **hints):
if self.is_symbolic:
raise ValueError("Coefficients must be numerical")
return wigner_3j(self.j1, self.j2, self.j3, self.m1, self.m2, self.m3)
| Wigner3j |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 793283,
"end": 795005
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"body",
"conditions",
"description",
"featured",
"hidden",
"implementation",
"key",
"limitations",
"name",
"nickname",
"permissions",
"pseudo_license",
"spdx_id",
"url",
)
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
conditions = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(LicenseRule)),
graphql_name="conditions",
)
description = sgqlc.types.Field(String, graphql_name="description")
featured = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="featured")
hidden = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hidden")
implementation = sgqlc.types.Field(String, graphql_name="implementation")
key = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="key")
limitations = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(LicenseRule)),
graphql_name="limitations",
)
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
nickname = sgqlc.types.Field(String, graphql_name="nickname")
permissions = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(LicenseRule)),
graphql_name="permissions",
)
pseudo_license = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="pseudoLicense"
)
spdx_id = sgqlc.types.Field(String, graphql_name="spdxId")
url = sgqlc.types.Field(URI, graphql_name="url")
| License |
python | google__jax | tests/pmap_test.py | {
"start": 97791,
"end": 107074
} | class ____(jtu.JaxTestCase):
def testAllDevices(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i',
devices=jax.devices())
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
expected = x - np.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected)
def testOneDevice(self):
if jax.device_count() == 1:
raise SkipTest("this test requires multiple devices")
d0 = jax.devices()[0]
d1 = jax.devices()[1]
f = lambda x: jnp.dot(x, x.T)
f0 = pmap(f, devices=[d0])
f1 = pmap(f, devices=[d1])
x = self.rng().rand(1, 500, 500)
r0 = f0(x)
r1 = f1(x)
expected = np.expand_dims(np.dot(x.squeeze(), x.squeeze().T), 0)
self.assertAllClose(r0, expected, atol=1e-6, rtol=1e-3)
self.assertAllClose(r1, expected, atol=1e-6, rtol=1e-3)
def testNoDevicesError(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i', devices=[])
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
with self.assertRaisesRegex(
ValueError, "'devices' argument to pmap must be non-empty, or None."):
f(x)
def testBadAxisSizeError(self):
if jax.device_count() == 1:
raise SkipTest("this test requires multiple devices")
if config.pmap_shmap_merge.value:
raise SkipTest("jit(shmap) does not raise error.")
f = pmap(lambda x: lax.psum(x, 'i'), axis_name='i',
devices=jax.devices())
with self.assertRaisesRegex(
ValueError, r"Leading axis size of input to pmapped function must "
r"equal the number of local devices passed to pmap. Got axis_size=1, "
r"num_local_devices=\d."):
f(jnp.ones(1))
with self.assertRaisesRegex(
ValueError, r"Leading axis size of input to pmapped function must "
r"equal the number of local devices passed to pmap. Got axis_size=\d, "
r"num_local_devices=\d."):
f(jnp.ones(jax.device_count() + 1))
def testBadAxisSizeErrorNested(self):
if config.disable_jit.value:
raise SkipTest("error doesn't apply when jit is disabled")
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
f = pmap(pmap(lambda x: lax.psum(x, ('i', 'j')),
axis_name='j'),
axis_name='i',
devices=[jax.local_devices()[0]])
with self.assertRaisesRegex(
ValueError,
r"pmapped function requires 4 local devices to run due to nested "
r"pmapped or other parallel functions, but only 1 are available."):
f(jnp.ones((1, 4)))
def testNestedPmaps(self):
if jax.device_count() % 2 != 0:
raise SkipTest
if config.disable_jit.value:
raise SkipTest("disable_jit requires num devices to equal axis size")
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
# Devices specified in outer pmap are OK
@partial(pmap, axis_name='i', devices=jax.devices())
def foo(x):
@partial(pmap, axis_name='j')
def bar(y):
return lax.psum(y, 'j')
return bar(x)
x = jnp.ones((jax.device_count() // 2, 2))
ans = foo(x)
expected = x * 2
self.assertAllClose(ans, expected)
def testNestedPmapsBools(self):
if jax.device_count() % 2 != 0:
raise SkipTest
if config.disable_jit.value:
raise SkipTest("disable_jit requires num devices to equal axis size")
if config.pmap_shmap_merge.value:
raise SkipTest("Ignore nested pmap when `pmap_shmap_merge=True`.")
# Devices specified in outer pmap are OK
@partial(pmap, axis_name='i', devices=jax.devices())
def foo(x):
@partial(pmap, axis_name='j')
def bar(y):
return jnp.logical_not(y)
return bar(x)
x = jnp.ones((jax.device_count() // 2, 2), jnp.bool_)
ans = foo(x)
expected = jnp.zeros((jax.device_count() // 2, 2), jnp.bool_)
self.assertAllClose(ans, expected)
def testNestedPmapsError(self):
if config.pmap_shmap_merge.value:
raise SkipTest('Ignore nested pmap when `pmap_shmap_merge=True`.')
# Devices specified in inner pmap not OK
@partial(pmap, axis_name='i')
def foo(x):
@partial(pmap, axis_name='j', devices=jax.devices())
def bar(y):
return lax.psum(y, 'j')
return bar(x)
with self.assertRaisesRegex(
ValueError,
"Nested pmap with explicit devices argument."):
foo(jnp.ones((jax.device_count(), 1)))
def testJitInPmap(self):
@partial(pmap, axis_name='i', devices=jax.devices())
def foo(x):
@jit
def bar(y):
return y + 1
return lax.psum(bar(x), 'i')
ndevices = jax.device_count()
ans = foo(jnp.ones((ndevices, 1)))
expected = np.ones((ndevices, 1), dtype=jnp.float_) * ndevices * 2
self.assertAllClose(ans, expected)
@ignore_jit_of_pmap_warning()
def testPmapInJit(self):
@jit
def foo(x):
@partial(pmap, axis_name='i', devices=jax.devices())
def bar(y):
return lax.psum(y, 'i')
return bar(x)
ndevices = jax.device_count()
ans = foo(jnp.ones((ndevices, 1)))
expected = np.ones((ndevices, 1), dtype=jnp.float_) * ndevices
self.assertAllClose(ans, expected)
def testGradBasic(self):
@partial(pmap, axis_name='i', devices=jax.devices())
def f(x):
return jnp.sin(x)
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
ans = grad(lambda x: jnp.sum(jnp.sin(x)))(x)
expected = grad(lambda x: jnp.sum(f(x)))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPmapStaticArgnums(self):
@partial(pmap, axis_name='i', static_broadcasted_argnums=1)
def f(x, y):
return jnp.sin(x + y())
shape = (jax.device_count(), 4)
x = np.arange(math.prod(shape), dtype=np.float32).reshape(shape)
y = lambda: 3.
ans = f(x, y)
expected = np.sin(x + 3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPmapInAxesBasic(self):
@partial(pmap, in_axes=(1, 2))
def f(x, y):
return jnp.sin(x + y)
xshape = (2, jax.device_count(), 4)
x = np.arange(math.prod(xshape)).reshape(xshape)
yshape = (2, 4, jax.device_count())
y = np.arange(math.prod(yshape)).reshape(yshape)
self.assertAllClose(f(x, y),
jnp.sin(x.transpose((1, 0, 2)) + y.transpose((2, 0, 1))))
def testPmapInAxesGrad(self):
def f(x, y, z):
return jnp.sin(x + y + z)
fp = pmap(f, in_axes=(1, 2, None))
fv = vmap(f, in_axes=(1, 2, None))
xshape = (5, jax.device_count(), 7)
x = np.arange(math.prod(xshape), dtype=np.float32).reshape(xshape)
yshape = (5, 7, jax.device_count())
y = np.arange(math.prod(yshape), dtype=np.float32).reshape(yshape)
zshape = (5, 7)
z = np.arange(math.prod(zshape), dtype=np.float32).reshape(zshape)
dx, dy, dz = jax.grad(lambda args: fp(*args).sum())((x, y, z))
assert dx.shape == xshape
assert dy.shape == yshape
assert dz.shape == zshape
self.assertAllClose(jax.grad(lambda args: fp(*args).sum())((x, y, z)),
jax.grad(lambda args: fv(*args).sum())((x, y, z)))
def testPmapOutAxesBasic(self):
@partial(pmap, in_axes=(1, None), out_axes=(2, None))
def f(x, y):
return jnp.sin(x + y), y * 2
xshape = (2, jax.device_count(), 4)
x = np.arange(math.prod(xshape)).reshape(xshape)
yshape = (2, 4)
y = np.arange(math.prod(yshape)).reshape(yshape)
self.assertAllClose(f(x, y),
(jnp.sin(x.transpose((1, 0, 2)) + y).transpose((1, 2, 0)), y * 2))
def testPmapDictOutAxes(self):
# see issue #6410
@partial(pmap, out_axes={'a': 0})
def f(x):
return {'a': x}
device_count = jax.device_count()
x = jnp.arange(device_count)
jax.tree.map(self.assertAllClose, f(x), {'a': x})
@jtu.sample_product(
in_axes=all_bdims((3, 4), (3, 1), (1, 4), pmap=True),
out_axes=out_bdims((3, 4), True),
)
def testPmapAllAxesGrad(self, in_axes, out_axes):
def f(x, y, z):
return jnp.sin(x + y) * z
pmapped_size = jax.device_count()
mapped_shapes = [(3, 4), (3, 1), (1, 4)]
arg_shapes = map(partial(add_bdim, pmapped_size), in_axes, mapped_shapes)
rng = jtu.rand_default(self.rng())
args = [rng(shape, jnp.float64) for shape in arg_shapes]
jtu.check_grads(pmap(f, in_axes=in_axes, out_axes=out_axes), args,
order=2, atol=2e-2, rtol=2e-2, eps=1e-3)
def testPmapPostProcess(self):
def mk_case(map_fun):
def f(x, y):
# NOTE: Map doesn't have any arguments we differentiate wrt
@partial(map_fun, in_axes=1, out_axes=2)
def h(y):
return jnp.sin(x + y)
return h(y).sum()
return f
xshape = (5, 7)
x = np.arange(math.prod(xshape), dtype=np.float32).reshape(xshape)
yshape = (5, jax.device_count(), 7)
y = np.arange(math.prod(yshape), dtype=np.float32).reshape(yshape)
self.assertAllClose(jax.grad(mk_case(pmap))(x, y),
jax.grad(mk_case(vmap))(x, y))
@jtu.pytest_mark_if_available('multiaccelerator')
| PmapWithDevicesTest |
python | kamyu104__LeetCode-Solutions | Python/find-minimum-operations-to-make-all-elements-divisible-by-three.py | {
"start": 36,
"end": 253
} | class ____(object):
def minimumOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sum(x%3 != 0 for x in nums)
# Time: O(n)
# Space: O(1)
# math
| Solution |
python | ray-project__ray | python/ray/tune/progress_reporter.py | {
"start": 1522,
"end": 3323
} | class ____:
"""Abstract class for experiment progress reporting.
`should_report()` is called to determine whether or not `report()` should
be called. Tune will call these functions after trial state transitions,
receiving training results, and so on.
"""
def setup(
self,
start_time: Optional[float] = None,
total_samples: Optional[int] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
**kwargs,
):
"""Setup progress reporter for a new Ray Tune run.
This function is used to initialize parameters that are set on runtime.
It will be called before any of the other methods.
Defaults to no-op.
Args:
start_time: Timestamp when the Ray Tune run is started.
total_samples: Number of samples the Ray Tune run will run.
metric: Metric to optimize.
mode: Must be one of [min, max]. Determines whether objective is
minimizing or maximizing the metric attribute.
**kwargs: Keyword arguments for forward-compatibility.
"""
pass
def should_report(self, trials: List[Trial], done: bool = False):
"""Returns whether or not progress should be reported.
Args:
trials: Trials to report on.
done: Whether this is the last progress report attempt.
"""
raise NotImplementedError
def report(self, trials: List[Trial], done: bool, *sys_info: Dict):
"""Reports progress across trials.
Args:
trials: Trials to report on.
done: Whether this is the last progress report attempt.
sys_info: System info.
"""
raise NotImplementedError
@DeveloperAPI
| ProgressReporter |
python | getsentry__sentry | tests/sentry/runner/commands/test_cleanup.py | {
"start": 478,
"end": 958
} | class ____:
"""Mock task queue that partially implements the _WorkQueue protocol but executes tasks synchronously."""
def __init__(self) -> None:
# You can use this to inspect the calls to the queue.
self.put_calls: list[tuple[str, tuple[int, ...]]] = []
def put(self, item: tuple[str, tuple[int, ...]]) -> None:
self.put_calls.append(item)
task_execution(item[0], item[1])
def join(self) -> None:
pass
| SynchronousTaskQueue |
python | pytorch__pytorch | torch/optim/swa_utils.py | {
"start": 3723,
"end": 15467
} | class ____(Module):
r"""Implements averaged model for Stochastic Weight Averaging (SWA) and Exponential Moving Average (EMA).
Stochastic Weight Averaging was proposed in `Averaging Weights Leads to
Wider Optima and Better Generalization`_ by Pavel Izmailov, Dmitrii
Podoprikhin, Timur Garipov, Dmitry Vetrov and Andrew Gordon Wilson
(UAI 2018).
Exponential Moving Average is a variation of `Polyak averaging`_,
but using exponential weights instead of equal weights across iterations.
AveragedModel class creates a copy of the provided module :attr:`model`
on the device :attr:`device` and allows to compute running averages of the
parameters of the :attr:`model`.
Args:
model (torch.nn.Module): model to use with SWA/EMA
device (torch.device, optional): if provided, the averaged model will be
stored on the :attr:`device`
avg_fn (function, optional): the averaging function used to update
parameters; the function must take in the current value of the
:class:`AveragedModel` parameter, the current value of :attr:`model`
parameter, and the number of models already averaged; if None,
an equally weighted average is used (default: None)
multi_avg_fn (function, optional): the averaging function used to update
parameters inplace; the function must take in the current values of the
:class:`AveragedModel` parameters as a list, the current values of :attr:`model`
parameters as a list, and the number of models already averaged; if None,
an equally weighted average is used (default: None)
use_buffers (bool): if ``True``, it will compute running averages for
both the parameters and the buffers of the model. (default: ``False``)
Example:
>>> # xdoctest: +SKIP("undefined variables")
>>> loader, optimizer, model, loss_fn = ...
>>> swa_model = torch.optim.swa_utils.AveragedModel(model)
>>> scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
>>> T_max=300)
>>> swa_start = 160
>>> swa_scheduler = SWALR(optimizer, swa_lr=0.05)
>>> for i in range(300):
>>> for input, target in loader:
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
>>> if i > swa_start:
>>> swa_model.update_parameters(model)
>>> swa_scheduler.step()
>>> else:
>>> scheduler.step()
>>>
>>> # Update bn statistics for the swa_model at the end
>>> torch.optim.swa_utils.update_bn(loader, swa_model)
You can also use custom averaging functions with the `avg_fn` or `multi_avg_fn` parameters.
If no averaging function is provided, the default is to compute
equally-weighted average of the weights (SWA).
Example:
>>> # xdoctest: +SKIP("undefined variables")
>>> # Compute exponential moving averages of the weights and buffers
>>> ema_model = torch.optim.swa_utils.AveragedModel(model,
>>> torch.optim.swa_utils.get_ema_multi_avg_fn(0.9), use_buffers=True)
.. note::
When using SWA/EMA with models containing Batch Normalization you may
need to update the activation statistics for Batch Normalization.
This can be done either by using the :meth:`torch.optim.swa_utils.update_bn`
or by setting :attr:`use_buffers` to `True`. The first approach updates the
statistics in a post-training step by passing data through the model. The
second does it during the parameter update phase by averaging all buffers.
Empirical evidence has shown that updating the statistics in normalization
layers increases accuracy, but you may wish to empirically test which
approach yields the best results in your problem.
.. note::
:attr:`avg_fn` and `multi_avg_fn` are not saved in the :meth:`state_dict` of the model.
.. note::
When :meth:`update_parameters` is called for the first time (i.e.
:attr:`n_averaged` is `0`) the parameters of `model` are copied
to the parameters of :class:`AveragedModel`. For every subsequent
call of :meth:`update_parameters` the function `avg_fn` is used
to update the parameters.
.. _Averaging Weights Leads to Wider Optima and Better Generalization:
https://arxiv.org/abs/1803.05407
.. _There Are Many Consistent Explanations of Unlabeled Data: Why You Should
Average:
https://arxiv.org/abs/1806.05594
.. _SWALP: Stochastic Weight Averaging in Low-Precision Training:
https://arxiv.org/abs/1904.11943
.. _Stochastic Weight Averaging in Parallel: Large-Batch Training That
Generalizes Well:
https://arxiv.org/abs/2001.02312
.. _Polyak averaging:
https://paperswithcode.com/method/polyak-averaging
"""
n_averaged: Tensor
def __init__(
self,
model: Module,
device: Optional[Union[int, torch.device]] = None,
avg_fn: Optional[Callable[[Tensor, Tensor, Union[Tensor, int]], Tensor]] = None,
multi_avg_fn: Optional[
Callable[[PARAM_LIST, PARAM_LIST, Union[Tensor, int]], None]
] = None,
use_buffers=False,
) -> None: # noqa: D107
super().__init__()
if avg_fn is not None and multi_avg_fn is not None:
raise AssertionError(
"Only one of avg_fn and multi_avg_fn should be provided"
)
self.module = deepcopy(model)
if device is not None:
self.module = self.module.to(device)
self.register_buffer(
"n_averaged", torch.tensor(0, dtype=torch.long, device=device)
)
self.avg_fn = avg_fn
self.multi_avg_fn = multi_avg_fn
self.use_buffers = use_buffers
def forward(self, *args, **kwargs):
"""Forward pass."""
return self.module(*args, **kwargs)
def update_parameters(self, model: Module) -> None:
"""Update model parameters."""
self_param = (
# pyrefly: ignore [bad-argument-type]
itertools.chain(self.module.parameters(), self.module.buffers())
if self.use_buffers
else self.parameters()
)
model_param = (
# pyrefly: ignore [bad-argument-type]
itertools.chain(model.parameters(), model.buffers())
if self.use_buffers
else model.parameters()
)
self_param_detached: list[Optional[Tensor]] = []
model_param_detached: list[Optional[Tensor]] = []
copy_param = bool(self.n_averaged == 0)
for p_averaged, p_model in zip(self_param, model_param, strict=False):
p_model_ = p_model.detach().to(p_averaged.device)
self_param_detached.append(p_averaged.detach())
model_param_detached.append(p_model_)
if copy_param:
p_averaged.detach().copy_(p_model_)
if self.n_averaged > 0:
if self.multi_avg_fn is not None or self.avg_fn is None:
grouped_tensors = _group_tensors_by_device_and_dtype(
[self_param_detached, model_param_detached]
)
for (device, _), (
[self_params, model_params],
_,
) in grouped_tensors.items():
if self.multi_avg_fn:
self.multi_avg_fn(
self_params, # type: ignore[arg-type]
model_params, # type: ignore[arg-type]
self.n_averaged.to(device),
)
elif (
device is not None
and device.type in _get_foreach_kernels_supported_devices()
):
multi_avg_fn = get_swa_multi_avg_fn()
multi_avg_fn(
self_params, model_params, self.n_averaged.to(device)
)
else:
avg_fn = get_swa_avg_fn()
n_averaged = self.n_averaged.to(device)
for p_averaged, p_model in zip( # type: ignore[assignment]
self_params, model_params, strict=True
):
# pyrefly: ignore [missing-attribute]
p_averaged.copy_(avg_fn(p_averaged, p_model, n_averaged))
else:
for p_averaged, p_model in zip( # type: ignore[assignment]
self_param_detached, model_param_detached, strict=True
):
# pyrefly: ignore [missing-attribute]
n_averaged = self.n_averaged.to(p_averaged.device)
# pyrefly: ignore [missing-attribute]
p_averaged.detach().copy_(
# pyrefly: ignore [missing-attribute, bad-argument-type]
self.avg_fn(p_averaged.detach(), p_model, n_averaged)
)
if not self.use_buffers:
# If not apply running averages to the buffers,
# keep the buffers in sync with the source model.
for b_swa, b_model in zip(
self.module.buffers(), model.buffers(), strict=True
):
b_swa.detach().copy_(b_model.detach().to(b_swa.device))
self.n_averaged += 1
@torch.no_grad()
def update_bn(
loader: Iterable[Any],
model: Module,
device: Optional[Union[int, torch.device]] = None,
) -> None:
r"""Update BatchNorm running_mean, running_var buffers in the model.
It performs one pass over data in `loader` to estimate the activation
statistics for BatchNorm layers in the model.
Args:
loader (torch.utils.data.DataLoader): dataset loader to compute the
activation statistics on. Each data batch should be either a
tensor, or a list/tuple whose first element is a tensor
containing data.
model (torch.nn.Module): model for which we seek to update BatchNorm
statistics.
device (torch.device, optional): If set, data will be transferred to
:attr:`device` before being passed into :attr:`model`.
Example:
>>> # xdoctest: +SKIP("Undefined variables")
>>> loader, model = ...
>>> torch.optim.swa_utils.update_bn(loader, model)
.. note::
The `update_bn` utility assumes that each data batch in :attr:`loader`
is either a tensor or a list or tuple of tensors; in the latter case it
is assumed that :meth:`model.forward()` should be called on the first
element of the list or tuple corresponding to the data batch.
"""
momenta = {}
for module in model.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.reset_running_stats()
momenta[module] = module.momentum
if not momenta:
return
was_training = model.training
model.train()
for module in momenta:
module.momentum = None
for input in loader:
if isinstance(input, (list, tuple)):
input = input[0]
if device is not None:
input = input.to(device)
model(input)
for bn_module in momenta:
bn_module.momentum = momenta[bn_module]
model.train(was_training)
| AveragedModel |
python | apache__airflow | airflow-core/tests/unit/serialization/test_serialized_objects.py | {
"start": 9048,
"end": 18033
} | class ____(LazySelectSequence):
_data = ["a", "b", "c"]
def __init__(self):
super().__init__(None, None, session="MockSession")
def __iter__(self) -> Iterator[str]:
return iter(self._data)
def __len__(self) -> int:
return len(self._data)
@pytest.mark.parametrize(
("input", "encoded_type", "cmp_func"),
[
("test_str", None, equals),
(1, None, equals),
(math.nan, None, lambda a, b: b == "nan"),
(math.inf, None, lambda a, b: b == "inf"),
(-math.inf, None, lambda a, b: b == "-inf"),
(timezone.utcnow(), DAT.DATETIME, equal_time),
(timedelta(minutes=2), DAT.TIMEDELTA, equals),
(Timezone("UTC"), DAT.TIMEZONE, lambda a, b: a.name == b.name),
(
relativedelta.relativedelta(hours=+1),
DAT.RELATIVEDELTA,
lambda a, b: a.hours == b.hours,
),
({"test": "dict", "test-1": 1}, None, equals),
(["array_item", 2], None, equals),
(("tuple_item", 3), DAT.TUPLE, equals),
(set(["set_item", 3]), DAT.SET, equals),
(
k8s.V1Pod(
metadata=k8s.V1ObjectMeta(
name="test",
annotations={"test": "annotation"},
creation_timestamp=timezone.utcnow(),
)
),
DAT.POD,
equals,
),
(
DAG(
"fake-dag",
schedule="*/10 * * * *",
default_args={"depends_on_past": True},
start_date=timezone.utcnow(),
catchup=False,
),
DAT.DAG,
lambda a, b: a.dag_id == b.dag_id and equal_time(a.start_date, b.start_date),
),
(Resources(cpus=0.1, ram=2048), None, None),
(EmptyOperator(task_id="test-task"), None, None),
(
TaskGroup(
group_id="test-group",
dag=DAG(dag_id="test_dag", start_date=datetime.now()),
),
None,
None,
),
(
Param("test", "desc"),
DAT.PARAM,
lambda a, b: a.value == b.value and a.description == b.description,
),
(
XComArg(
operator=PythonOperator(
python_callable=int,
task_id="test_xcom_op",
do_xcom_push=True,
)
),
DAT.XCOM_REF,
None,
),
(
MockLazySelectSequence(),
None,
lambda a, b: len(a) == len(b) and isinstance(b, list),
),
(Asset(uri="test://asset1", name="test"), DAT.ASSET, equals),
(
Asset(
uri="test://asset1",
name="test",
watchers=[AssetWatcher(name="test", trigger=FileDeleteTrigger(filepath="/tmp"))],
),
DAT.ASSET,
equals,
),
(
Connection(conn_id="TEST_ID", uri="mysql://"),
DAT.CONNECTION,
lambda a, b: a.get_uri() == b.get_uri(),
),
(
TaskCallbackRequest(
filepath="filepath",
ti=TI,
bundle_name="testing",
bundle_version=None,
),
DAT.TASK_CALLBACK_REQUEST,
lambda a, b: a.ti == b.ti,
),
(
DagCallbackRequest(
filepath="filepath",
dag_id="fake_dag",
run_id="fake_run",
bundle_name="testing",
bundle_version=None,
),
DAT.DAG_CALLBACK_REQUEST,
lambda a, b: a.dag_id == b.dag_id,
),
(Asset.ref(name="test"), DAT.ASSET_REF, lambda a, b: a.name == b.name),
(
DeadlineAlert(
reference=DeadlineReference.DAGRUN_LOGICAL_DATE,
interval=timedelta(),
callback=AsyncCallback("fake_callable"),
),
None,
None,
),
(
create_outlet_event_accessors(
Asset(uri="test", name="test", group="test-group"), {"key": "value"}, []
),
DAT.ASSET_EVENT_ACCESSORS,
equal_outlet_event_accessors,
),
(
create_outlet_event_accessors(
AssetAlias(name="test_alias", group="test-alias-group"),
{"key": "value"},
[
AssetAliasEvent(
source_alias_name="test_alias",
dest_asset_key=AssetUniqueKey(name="test_name", uri="test://asset-uri"),
dest_asset_extra={"extra": "from asset itself"},
extra={"extra": "from event"},
)
],
),
DAT.ASSET_EVENT_ACCESSORS,
equal_outlet_event_accessors,
),
(
AirflowException("test123 wohoo!"),
DAT.AIRFLOW_EXC_SER,
equal_exception,
),
(
AirflowFailException("uuups, failed :-("),
DAT.AIRFLOW_EXC_SER,
equal_exception,
),
(
DAG_WITH_TASKS,
DAT.DAG,
lambda _, b: list(b.task_group.children.keys()) == sorted(b.task_group.children.keys()),
),
(
DeadlineAlert(
reference=DeadlineReference.DAGRUN_QUEUED_AT,
interval=timedelta(hours=1),
callback=AsyncCallback("valid.callback.path", kwargs={"arg1": "value1"}),
),
DAT.DEADLINE_ALERT,
equals,
),
],
)
def test_serialize_deserialize(input, encoded_type, cmp_func):
from airflow.serialization.serialized_objects import BaseSerialization
serialized = BaseSerialization.serialize(input) # does not raise
json.dumps(serialized) # does not raise
if encoded_type is not None:
assert serialized[Encoding.TYPE] == encoded_type
assert serialized[Encoding.VAR] is not None
if cmp_func is not None:
deserialized = BaseSerialization.deserialize(serialized)
assert cmp_func(input, deserialized)
# Verify recursive behavior
obj = [[input]]
serialized = BaseSerialization.serialize(obj) # does not raise
# Verify the result is JSON-serializable
json.dumps(serialized) # does not raise
@pytest.mark.parametrize("reference", REFERENCE_TYPES)
def test_serialize_deserialize_deadline_alert(reference):
public_deadline_alert_fields = {
field.lower() for field in vars(DeadlineAlertFields) if not field.startswith("_")
}
original = DeadlineAlert(
reference=reference,
interval=timedelta(hours=1),
callback=AsyncCallback(empty_callback_for_deadline, kwargs=TEST_CALLBACK_KWARGS),
)
serialized = original.serialize_deadline_alert()
assert serialized[Encoding.TYPE] == DAT.DEADLINE_ALERT
assert set(serialized[Encoding.VAR].keys()) == public_deadline_alert_fields
deserialized = DeadlineAlert.deserialize_deadline_alert(serialized)
assert deserialized.reference.serialize_reference() == reference.serialize_reference()
assert deserialized.interval == original.interval
assert deserialized.callback == original.callback
@pytest.mark.parametrize(
"conn_uri",
[
pytest.param("aws://", id="only-conn-type"),
pytest.param(
"postgres://username:password@ec2.compute.com:5432/the_database",
id="all-non-extra",
),
pytest.param(
"///?__extra__=%7B%22foo%22%3A+%22bar%22%2C+%22answer%22%3A+42%2C+%22"
"nullable%22%3A+null%2C+%22empty%22%3A+%22%22%2C+%22zero%22%3A+0%7D",
id="extra",
),
],
)
def test_backcompat_deserialize_connection(conn_uri):
"""Test deserialize connection which serialised by previous serializer implementation."""
from airflow.serialization.serialized_objects import BaseSerialization
conn_obj = {
Encoding.TYPE: DAT.CONNECTION,
Encoding.VAR: {"conn_id": "TEST_ID", "uri": conn_uri},
}
deserialized = BaseSerialization.deserialize(conn_obj)
assert deserialized.get_uri() == conn_uri
def test_ser_of_asset_event_accessor():
# todo: (Airflow 3.0) we should force reserialization on upgrade
d = OutletEventAccessors()
d[
Asset("hi")
].extra = "blah1" # todo: this should maybe be forbidden? i.e. can extra be any json or just dict?
d[Asset(name="yo", uri="test://yo")].extra = {"this": "that", "the": "other"}
ser = BaseSerialization.serialize(var=d)
deser = BaseSerialization.deserialize(ser)
assert deser[Asset(uri="hi", name="hi")].extra == "blah1"
assert d[Asset(name="yo", uri="test://yo")].extra == {"this": "that", "the": "other"}
| MockLazySelectSequence |
python | django__django | tests/generic_inline_admin/admin.py | {
"start": 370,
"end": 443
} | class ____(GenericTabularInline):
model = PhoneNumber
| PhoneNumberInline |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/events.py | {
"start": 117609,
"end": 125363
} | class ____(event.Events[Query[Any]]):
"""Represent events within the construction of a :class:`_query.Query`
object.
.. legacy:: The :class:`_orm.QueryEvents` event methods are legacy
as of SQLAlchemy 2.0, and only apply to direct use of the
:class:`_orm.Query` object. They are not used for :term:`2.0 style`
statements. For events to intercept and modify 2.0 style ORM use,
use the :meth:`_orm.SessionEvents.do_orm_execute` hook.
The :class:`_orm.QueryEvents` hooks are now superseded by the
:meth:`_orm.SessionEvents.do_orm_execute` event hook.
"""
_target_class_doc = "SomeQuery"
_dispatch_target = Query
def before_compile(self, query: Query[Any]) -> None:
"""Receive the :class:`_query.Query`
object before it is composed into a
core :class:`_expression.Select` object.
.. deprecated:: 1.4 The :meth:`_orm.QueryEvents.before_compile` event
is superseded by the much more capable
:meth:`_orm.SessionEvents.do_orm_execute` hook. In version 1.4,
the :meth:`_orm.QueryEvents.before_compile` event is **no longer
used** for ORM-level attribute loads, such as loads of deferred
or expired attributes as well as relationship loaders. See the
new examples in :ref:`examples_session_orm_events` which
illustrate new ways of intercepting and modifying ORM queries
for the most common purpose of adding arbitrary filter criteria.
This event is intended to allow changes to the query given::
@event.listens_for(Query, "before_compile", retval=True)
def no_deleted(query):
for desc in query.column_descriptions:
if desc["type"] is User:
entity = desc["entity"]
query = query.filter(entity.deleted == False)
return query
The event should normally be listened with the ``retval=True``
parameter set, so that the modified query may be returned.
The :meth:`.QueryEvents.before_compile` event by default
will disallow "baked" queries from caching a query, if the event
hook returns a new :class:`_query.Query` object.
This affects both direct
use of the baked query extension as well as its operation within
lazy loaders and eager loaders for relationships. In order to
re-establish the query being cached, apply the event adding the
``bake_ok`` flag::
@event.listens_for(Query, "before_compile", retval=True, bake_ok=True)
def my_event(query):
for desc in query.column_descriptions:
if desc["type"] is User:
entity = desc["entity"]
query = query.filter(entity.deleted == False)
return query
When ``bake_ok`` is set to True, the event hook will only be invoked
once, and not called for subsequent invocations of a particular query
that is being cached.
.. seealso::
:meth:`.QueryEvents.before_compile_update`
:meth:`.QueryEvents.before_compile_delete`
:ref:`baked_with_before_compile`
""" # noqa: E501
def before_compile_update(
self, query: Query[Any], update_context: BulkUpdate
) -> None:
"""Allow modifications to the :class:`_query.Query` object within
:meth:`_query.Query.update`.
.. deprecated:: 1.4 The :meth:`_orm.QueryEvents.before_compile_update`
event is superseded by the much more capable
:meth:`_orm.SessionEvents.do_orm_execute` hook.
Like the :meth:`.QueryEvents.before_compile` event, if the event
is to be used to alter the :class:`_query.Query` object, it should
be configured with ``retval=True``, and the modified
:class:`_query.Query` object returned, as in ::
@event.listens_for(Query, "before_compile_update", retval=True)
def no_deleted(query, update_context):
for desc in query.column_descriptions:
if desc["type"] is User:
entity = desc["entity"]
query = query.filter(entity.deleted == False)
update_context.values["timestamp"] = datetime.datetime.now(
datetime.UTC
)
return query
The ``.values`` dictionary of the "update context" object can also
be modified in place as illustrated above.
:param query: a :class:`_query.Query` instance; this is also
the ``.query`` attribute of the given "update context"
object.
:param update_context: an "update context" object which is
the same kind of object as described in
:paramref:`.QueryEvents.after_bulk_update.update_context`.
The object has a ``.values`` attribute in an UPDATE context which is
the dictionary of parameters passed to :meth:`_query.Query.update`.
This
dictionary can be modified to alter the VALUES clause of the
resulting UPDATE statement.
.. seealso::
:meth:`.QueryEvents.before_compile`
:meth:`.QueryEvents.before_compile_delete`
""" # noqa: E501
def before_compile_delete(
self, query: Query[Any], delete_context: BulkDelete
) -> None:
"""Allow modifications to the :class:`_query.Query` object within
:meth:`_query.Query.delete`.
.. deprecated:: 1.4 The :meth:`_orm.QueryEvents.before_compile_delete`
event is superseded by the much more capable
:meth:`_orm.SessionEvents.do_orm_execute` hook.
Like the :meth:`.QueryEvents.before_compile` event, this event
should be configured with ``retval=True``, and the modified
:class:`_query.Query` object returned, as in ::
@event.listens_for(Query, "before_compile_delete", retval=True)
def no_deleted(query, delete_context):
for desc in query.column_descriptions:
if desc["type"] is User:
entity = desc["entity"]
query = query.filter(entity.deleted == False)
return query
:param query: a :class:`_query.Query` instance; this is also
the ``.query`` attribute of the given "delete context"
object.
:param delete_context: a "delete context" object which is
the same kind of object as described in
:paramref:`.QueryEvents.after_bulk_delete.delete_context`.
.. seealso::
:meth:`.QueryEvents.before_compile`
:meth:`.QueryEvents.before_compile_update`
"""
@classmethod
def _listen(
cls,
event_key: _EventKey[_ET],
retval: bool = False,
bake_ok: bool = False,
**kw: Any,
) -> None:
fn = event_key._listen_fn
if not retval:
def wrap(*arg: Any, **kw: Any) -> Any:
if not retval:
query = arg[0]
fn(*arg, **kw)
return query
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
else:
# don't assume we can apply an attribute to the callable
def wrap(*arg: Any, **kw: Any) -> Any:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
wrap._bake_ok = bake_ok # type: ignore [attr-defined]
event_key.base_listen(**kw)
| QueryEvents |
python | dateutil__dateutil | src/dateutil/tz/_common.py | {
"start": 8750,
"end": 12977
} | class ____(_tzinfo):
"""
This is an abstract base class for time zones represented by an annual
transition into and out of DST. Child classes should implement the following
methods:
* ``__init__(self, *args, **kwargs)``
* ``transitions(self, year)`` - this is expected to return a tuple of
datetimes representing the DST on and off transitions in standard
time.
A fully initialized ``tzrangebase`` subclass should also provide the
following attributes:
* ``hasdst``: Boolean whether or not the zone uses DST.
* ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
representing the respective UTC offsets.
* ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
abbreviations in DST and STD, respectively.
* ``_hasdst``: Whether or not the zone has DST.
.. versionadded:: 2.6.0
"""
def __init__(self):
raise NotImplementedError('tzrangebase is an abstract base class')
def utcoffset(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_base_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def fromutc(self, dt):
""" Given a datetime in UTC, return local time """
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Get transitions - if there are none, fixed offset
transitions = self.transitions(dt.year)
if transitions is None:
return dt + self.utcoffset(dt)
# Get the transition times in UTC
dston, dstoff = transitions
dston -= self._std_offset
dstoff -= self._std_offset
utc_transitions = (dston, dstoff)
dt_utc = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt_utc, utc_transitions)
if isdst:
dt_wall = dt + self._dst_offset
else:
dt_wall = dt + self._std_offset
_fold = int(not isdst and self.is_ambiguous(dt_wall))
return enfold(dt_wall, fold=_fold)
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if not self.hasdst:
return False
start, end = self.transitions(dt.year)
dt = dt.replace(tzinfo=None)
return (end <= dt < end + self._dst_base_offset)
def _isdst(self, dt):
if not self.hasdst:
return False
elif dt is None:
return None
transitions = self.transitions(dt.year)
if transitions is None:
return False
dt = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt, transitions)
# Handle ambiguous dates
if not isdst and self.is_ambiguous(dt):
return not self._fold(dt)
else:
return isdst
def _naive_isdst(self, dt, transitions):
dston, dstoff = transitions
dt = dt.replace(tzinfo=None)
if dston < dstoff:
isdst = dston <= dt < dstoff
else:
isdst = not dstoff <= dt < dston
return isdst
@property
def _dst_base_offset(self):
return self._dst_offset - self._std_offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
| tzrangebase |
python | sympy__sympy | sympy/codegen/ast.py | {
"start": 16799,
"end": 18450
} | class ____(AugmentedAssignment):
binop = '%'
# Mapping from binary op strings to AugmentedAssignment subclasses
augassign_classes = {
cls.binop: cls for cls in [
AddAugmentedAssignment, SubAugmentedAssignment, MulAugmentedAssignment,
DivAugmentedAssignment, ModAugmentedAssignment
]
}
def aug_assign(lhs, op, rhs):
"""
Create 'lhs op= rhs'.
Explanation
===========
Represents augmented variable assignment for code generation. This is a
convenience function. You can also use the AugmentedAssignment classes
directly, like AddAugmentedAssignment(x, y).
Parameters
==========
lhs : Expr
SymPy object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
op : str
Operator (+, -, /, \\*, %).
rhs : Expr
SymPy object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> from sympy import symbols
>>> from sympy.codegen.ast import aug_assign
>>> x, y = symbols('x, y')
>>> aug_assign(x, '+', y)
AddAugmentedAssignment(x, y)
"""
if op not in augassign_classes:
raise ValueError("Unrecognized operator %s" % op)
return augassign_classes[op](lhs, rhs)
| ModAugmentedAssignment |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/9_Deep_Deterministic_Policy_Gradient_DDPG/DDPG_update.py | {
"start": 719,
"end": 5712
} | class ____(object):
def __init__(self, a_dim, s_dim, a_bound,):
self.memory = np.zeros((MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)
self.pointer = 0
self.sess = tf.Session()
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
with tf.variable_scope('Actor'):
self.a = self._build_a(self.S, scope='eval', trainable=True)
a_ = self._build_a(self.S_, scope='target', trainable=False)
with tf.variable_scope('Critic'):
# assign self.a = a in memory when calculating q for td_error,
# otherwise the self.a is from Actor when updating Actor
q = self._build_c(self.S, self.a, scope='eval', trainable=True)
q_ = self._build_c(self.S_, a_, scope='target', trainable=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - TAU) * t + TAU * e)
for t, e in zip(self.at_params + self.ct_params, self.ae_params + self.ce_params)]
q_target = self.R + GAMMA * q_
# in the feed_dic for the td_error, the self.a should change to actions in memory
td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.ctrain = tf.train.AdamOptimizer(LR_C).minimize(td_error, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(LR_A).minimize(a_loss, var_list=self.ae_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s):
return self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]
def learn(self):
# soft target replacement
self.sess.run(self.soft_replace)
indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)
bt = self.memory[indices, :]
bs = bt[:, :self.s_dim]
ba = bt[:, self.s_dim: self.s_dim + self.a_dim]
br = bt[:, -self.s_dim - 1: -self.s_dim]
bs_ = bt[:, -self.s_dim:]
self.sess.run(self.atrain, {self.S: bs})
self.sess.run(self.ctrain, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, a, [r], s_))
index = self.pointer % MEMORY_CAPACITY # replace the old memory with new memory
self.memory[index, :] = transition
self.pointer += 1
def _build_a(self, s, scope, trainable):
with tf.variable_scope(scope):
net = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)
a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name='scaled_a')
def _build_c(self, s, a, scope, trainable):
with tf.variable_scope(scope):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)
############################### training ####################################
env = gym.make(ENV_NAME)
env = env.unwrapped
env.seed(1)
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_bound = env.action_space.high
ddpg = DDPG(a_dim, s_dim, a_bound)
var = 3 # control exploration
t1 = time.time()
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
for j in range(MAX_EP_STEPS):
if RENDER:
env.render()
# Add exploration noise
a = ddpg.choose_action(s)
a = np.clip(np.random.normal(a, var), -2, 2) # add randomness to action selection for exploration
s_, r, done, info = env.step(a)
ddpg.store_transition(s, a, r / 10, s_)
if ddpg.pointer > MEMORY_CAPACITY:
var *= .9995 # decay the action randomness
ddpg.learn()
s = s_
ep_reward += r
if j == MAX_EP_STEPS-1:
print('Episode:', i, ' Reward: %i' % int(ep_reward), 'Explore: %.2f' % var, )
# if ep_reward > -300:RENDER = True
break
print('Running time: ', time.time() - t1) | DDPG |
python | apache__airflow | providers/edge3/src/airflow/providers/edge3/worker_api/datamodels.py | {
"start": 3296,
"end": 3712
} | class ____(EdgeJobBase):
"""Job that is to be executed on the edge worker."""
command: Annotated[
ExecuteTask,
Field(
title="Command",
description="Command line to use to execute the job in Airflow 2. Task definition in Airflow 3",
),
]
concurrency_slots: Annotated[int, Field(description="Number of concurrency slots the job requires.")]
| EdgeJobFetched |
python | huggingface__transformers | src/transformers/models/luke/modeling_luke.py | {
"start": 3617,
"end": 5554
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
The sum of masked language modeling (MLM) loss and entity prediction loss.
mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked entity prediction (MEP) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax).
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
layer plus the initial entity embedding outputs.
"""
loss: Optional[torch.FloatTensor] = None
mlm_loss: Optional[torch.FloatTensor] = None
mep_loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
entity_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
entity_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Outputs of entity classification models.
"""
)
| LukeMaskedLMOutput |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py | {
"start": 1850,
"end": 2431
} | class ____(BaseHeuristic):
"""
Cache the response by providing an expires 1 day in the
future.
"""
def update_headers(self, response: HTTPResponse) -> dict[str, str]:
headers = {}
if "expires" not in response.headers:
date = parsedate(response.headers["date"])
expires = expire_after(timedelta(days=1), date=datetime(*date[:6], tzinfo=timezone.utc)) # type: ignore[index,misc]
headers["expires"] = datetime_to_header(expires)
headers["cache-control"] = "public"
return headers
| OneDayCache |
python | coleifer__peewee | tests/sqlite.py | {
"start": 2976,
"end": 3048
} | class ____(TestModel):
key = TextField()
data = JSONField()
| KeyData |
python | redis__redis-py | redis/commands/timeseries/__init__.py | {
"start": 3356,
"end": 3450
} | class ____(TimeSeriesCommands, redis.client.Pipeline):
"""Pipeline for the module."""
| Pipeline |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/input/posix_utils.py | {
"start": 142,
"end": 3973
} | class ____:
"""
Wrapper around stdin which reads (nonblocking) the next available 1024
bytes and decodes it.
Note that you can't be sure that the input file is closed if the ``read``
function returns an empty string. When ``errors=ignore`` is passed,
``read`` can return an empty string if all malformed input was replaced by
an empty string. (We can't block here and wait for more input.) So, because
of that, check the ``closed`` attribute, to be sure that the file has been
closed.
:param stdin_fd: File descriptor from which we read.
:param errors: Can be 'ignore', 'strict' or 'replace'.
On Python3, this can be 'surrogateescape', which is the default.
'surrogateescape' is preferred, because this allows us to transfer
unrecognized bytes to the key bindings. Some terminals, like lxterminal
and Guake, use the 'Mxx' notation to send mouse events, where each 'x'
can be any possible byte.
"""
# By default, we want to 'ignore' errors here. The input stream can be full
# of junk. One occurrence of this that I had was when using iTerm2 on OS X,
# with "Option as Meta" checked (You should choose "Option as +Esc".)
def __init__(
self, stdin_fd: int, errors: str = "surrogateescape", encoding: str = "utf-8"
) -> None:
self.stdin_fd = stdin_fd
self.errors = errors
# Create incremental decoder for decoding stdin.
# We can not just do `os.read(stdin.fileno(), 1024).decode('utf-8')`, because
# it could be that we are in the middle of a utf-8 byte sequence.
self._stdin_decoder_cls = getincrementaldecoder(encoding)
self._stdin_decoder = self._stdin_decoder_cls(errors=errors)
#: True when there is nothing anymore to read.
self.closed = False
def read(self, count: int = 1024) -> str:
# By default we choose a rather small chunk size, because reading
# big amounts of input at once, causes the event loop to process
# all these key bindings also at once without going back to the
# loop. This will make the application feel unresponsive.
"""
Read the input and return it as a string.
Return the text. Note that this can return an empty string, even when
the input stream was not yet closed. This means that something went
wrong during the decoding.
"""
if self.closed:
return ""
# Check whether there is some input to read. `os.read` would block
# otherwise.
# (Actually, the event loop is responsible to make sure that this
# function is only called when there is something to read, but for some
# reason this happens in certain situations.)
try:
if not select.select([self.stdin_fd], [], [], 0)[0]:
return ""
except OSError:
# Happens for instance when the file descriptor was closed.
# (We had this in ptterm, where the FD became ready, a callback was
# scheduled, but in the meantime another callback closed it already.)
self.closed = True
# Note: the following works better than wrapping `self.stdin` like
# `codecs.getreader('utf-8')(stdin)` and doing `read(1)`.
# Somehow that causes some latency when the escape
# character is pressed. (Especially on combination with the `select`.)
try:
data = os.read(self.stdin_fd, count)
# Nothing more to read, stream is closed.
if data == b"":
self.closed = True
return ""
except OSError:
# In case of SIGWINCH
data = b""
return self._stdin_decoder.decode(data)
| PosixStdinReader |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 215721,
"end": 226589
} | class ____(Request):
"""
Publishes the specified version and creates a draft child version for it
:param dataset: Dataset ID
:type dataset: str
:param version: Draft version ID
:type version: str
:param publish_name: New name for the published version. The default value is
'snapshot <date-time>'
:type publish_name: str
:param publish_comment: New comment for the published version. The default
value is 'published at <date-time> by <user>'
:type publish_comment: str
:param publish_metadata: User-specified metadata object for the published
version. Keys must not include '$' and '.'.
:type publish_metadata: dict
:param child_name: Name for the child version. If not provided then the name of
the parent version is taken
:type child_name: str
:param child_comment: Comment for the child version
:type child_comment: str
:param child_metadata: User-specified metadata object for the child version.
Keys must not include '$' and '.'.
:type child_metadata: dict
:param publish_tags: The new user-defined tags for the published version. If
not passed then the parent version tags are used
:type publish_tags: Sequence[str]
:param publish_system_tags: The new system tags for the published version. If
not passed then the parent version system tags are used
:type publish_system_tags: Sequence[str]
:param child_tags: The new user tags for the child version. If not passed then
the parent version tags are used
:type child_tags: Sequence[str]
:param child_system_tags: The new system tags for the child version. If not
passed then the parent version system tags are used
:type child_system_tags: Sequence[str]
"""
_service = "datasets"
_action = "publish_and_create_child_version"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"child_comment": {
"description": "Comment for the child version",
"type": "string",
},
"child_metadata": {
"additionalProperties": True,
"description": (
"User-specified metadata object for the child version. Keys must not include '$' and '.'."
),
"type": "object",
},
"child_name": {
"description": (
"Name for the child version. If not provided then the name of the parent version is taken"
),
"type": "string",
},
"child_system_tags": {
"description": (
"The new system tags for the child version. If not passed then the parent version system "
"tags are used"
),
"items": {"type": "string"},
"type": "array",
},
"child_tags": {
"description": (
"The new user tags for the child version. If not passed then the parent version tags are used"
),
"items": {"type": "string"},
"type": "array",
},
"dataset": {"description": "Dataset ID", "type": "string"},
"publish_comment": {
"description": (
"New comment for the published version. The default value is 'published at <date-time> by <user>'"
),
"type": "string",
},
"publish_metadata": {
"additionalProperties": True,
"description": (
"User-specified metadata object for the published version. Keys must not include '$' and '.'."
),
"type": "object",
},
"publish_name": {
"description": "New name for the published version. The default value is 'snapshot <date-time>'",
"type": "string",
},
"publish_system_tags": {
"description": (
"The new system tags for the published version. If not passed then the parent version system tags "
"are used"
),
"items": {"type": "string"},
"type": "array",
},
"publish_tags": {
"description": (
"The new user-defined tags for the published version. If not passed then the parent "
"version tags are used"
),
"items": {"type": "string"},
"type": "array",
},
"version": {"description": "Draft version ID", "type": "string"},
},
"required": ["dataset", "version"],
"type": "object",
}
def __init__(
self,
dataset,
version,
publish_name=None,
publish_comment=None,
publish_metadata=None,
child_name=None,
child_comment=None,
child_metadata=None,
publish_tags=None,
publish_system_tags=None,
child_tags=None,
child_system_tags=None,
**kwargs
):
super(PublishAndCreateChildVersionRequest, self).__init__(**kwargs)
self.dataset = dataset
self.version = version
self.publish_name = publish_name
self.publish_comment = publish_comment
self.publish_metadata = publish_metadata
self.child_name = child_name
self.child_comment = child_comment
self.child_metadata = child_metadata
self.publish_tags = publish_tags
self.publish_system_tags = publish_system_tags
self.child_tags = child_tags
self.child_system_tags = child_system_tags
@schema_property("dataset")
def dataset(self):
return self._property_dataset
@dataset.setter
def dataset(self, value):
if value is None:
self._property_dataset = None
return
self.assert_isinstance(value, "dataset", six.string_types)
self._property_dataset = value
@schema_property("version")
def version(self):
return self._property_version
@version.setter
def version(self, value):
if value is None:
self._property_version = None
return
self.assert_isinstance(value, "version", six.string_types)
self._property_version = value
@schema_property("publish_name")
def publish_name(self):
return self._property_publish_name
@publish_name.setter
def publish_name(self, value):
if value is None:
self._property_publish_name = None
return
self.assert_isinstance(value, "publish_name", six.string_types)
self._property_publish_name = value
@schema_property("publish_comment")
def publish_comment(self):
return self._property_publish_comment
@publish_comment.setter
def publish_comment(self, value):
if value is None:
self._property_publish_comment = None
return
self.assert_isinstance(value, "publish_comment", six.string_types)
self._property_publish_comment = value
@schema_property("publish_metadata")
def publish_metadata(self):
return self._property_publish_metadata
@publish_metadata.setter
def publish_metadata(self, value):
if value is None:
self._property_publish_metadata = None
return
self.assert_isinstance(value, "publish_metadata", (dict,))
self._property_publish_metadata = value
@schema_property("child_name")
def child_name(self):
return self._property_child_name
@child_name.setter
def child_name(self, value):
if value is None:
self._property_child_name = None
return
self.assert_isinstance(value, "child_name", six.string_types)
self._property_child_name = value
@schema_property("child_comment")
def child_comment(self):
return self._property_child_comment
@child_comment.setter
def child_comment(self, value):
if value is None:
self._property_child_comment = None
return
self.assert_isinstance(value, "child_comment", six.string_types)
self._property_child_comment = value
@schema_property("child_metadata")
def child_metadata(self):
return self._property_child_metadata
@child_metadata.setter
def child_metadata(self, value):
if value is None:
self._property_child_metadata = None
return
self.assert_isinstance(value, "child_metadata", (dict,))
self._property_child_metadata = value
@schema_property("publish_tags")
def publish_tags(self):
return self._property_publish_tags
@publish_tags.setter
def publish_tags(self, value):
if value is None:
self._property_publish_tags = None
return
self.assert_isinstance(value, "publish_tags", (list, tuple))
self.assert_isinstance(value, "publish_tags", six.string_types, is_array=True)
self._property_publish_tags = value
@schema_property("publish_system_tags")
def publish_system_tags(self):
return self._property_publish_system_tags
@publish_system_tags.setter
def publish_system_tags(self, value):
if value is None:
self._property_publish_system_tags = None
return
self.assert_isinstance(value, "publish_system_tags", (list, tuple))
self.assert_isinstance(
value, "publish_system_tags", six.string_types, is_array=True
)
self._property_publish_system_tags = value
@schema_property("child_tags")
def child_tags(self):
return self._property_child_tags
@child_tags.setter
def child_tags(self, value):
if value is None:
self._property_child_tags = None
return
self.assert_isinstance(value, "child_tags", (list, tuple))
self.assert_isinstance(value, "child_tags", six.string_types, is_array=True)
self._property_child_tags = value
@schema_property("child_system_tags")
def child_system_tags(self):
return self._property_child_system_tags
@child_system_tags.setter
def child_system_tags(self, value):
if value is None:
self._property_child_system_tags = None
return
self.assert_isinstance(value, "child_system_tags", (list, tuple))
self.assert_isinstance(
value, "child_system_tags", six.string_types, is_array=True
)
self._property_child_system_tags = value
| PublishAndCreateChildVersionRequest |
python | wandb__wandb | wandb/apis/public/artifacts.py | {
"start": 5394,
"end": 8630
} | class ____:
"""An artifact object that satisfies query based on the specified type.
Args:
client: The client instance to use for querying W&B.
entity: The entity (user or team) that owns the project.
project: The name of the project to query for artifact types.
type_name: The name of the artifact type.
attrs: Optional attributes to initialize the ArtifactType.
If omitted, the object will load its attributes from W&B upon
initialization.
<!-- lazydoc-ignore-init: internal -->
"""
_attrs: ArtifactTypeFragment
def __init__(
self,
client: Client,
entity: str,
project: str,
type_name: str,
attrs: ArtifactTypeFragment | None = None,
):
from wandb.sdk.artifacts._generated import ArtifactTypeFragment
self.client = client
self.entity = entity
self.project = project
self.type = type_name
# FIXME: Make this lazy, so we don't (re-)fetch the attributes until they are needed
self._attrs = ArtifactTypeFragment.model_validate(attrs or self.load())
def load(self) -> ArtifactTypeFragment:
"""Load the artifact type attributes from W&B.
<!-- lazydoc-ignore: internal -->
"""
from wandb.sdk.artifacts._generated import (
PROJECT_ARTIFACT_TYPE_GQL,
ArtifactTypeFragment,
ProjectArtifactType,
)
gql_op = gql(PROJECT_ARTIFACT_TYPE_GQL)
gql_vars = {
"entity": self.entity,
"project": self.project,
"artifactType": self.type,
}
data = self.client.execute(gql_op, variable_values=gql_vars)
result = ProjectArtifactType.model_validate(data)
if not ((proj := result.project) and (artifact_type := proj.artifact_type)):
raise ValueError(f"Could not find artifact type {self.type!r}")
return ArtifactTypeFragment.model_validate(artifact_type)
@property
def id(self) -> str:
"""The unique identifier of the artifact type."""
return self._attrs.id
@property
def name(self) -> str:
"""The name of the artifact type."""
return self._attrs.name
@normalize_exceptions
def collections(self, per_page: int = 50) -> ArtifactCollections:
"""Get all artifact collections associated with this artifact type.
Args:
per_page (int): The number of artifact collections to fetch per page.
Default is 50.
"""
return ArtifactCollections(
self.client,
entity=self.entity,
project=self.project,
type_name=self.type,
)
def collection(self, name: str) -> ArtifactCollection:
"""Get a specific artifact collection by name.
Args:
name (str): The name of the artifact collection to retrieve.
"""
return ArtifactCollection(
self.client,
entity=self.entity,
project=self.project,
name=name,
type=self.type,
)
def __repr__(self) -> str:
return f"<ArtifactType {self.type}>"
| ArtifactType |
python | tensorflow__tensorflow | tensorflow/python/profiler/profiler_v2.py | {
"start": 1889,
"end": 6608
} | class ____(
collections.namedtuple('ProfilerOptions', [
'host_tracer_level', 'python_tracer_level', 'device_tracer_level',
'delay_ms'
])):
"""Options for finer control over the profiler.
Use `tf.profiler.experimental.ProfilerOptions` to control `tf.profiler`
behavior.
Fields:
host_tracer_level: Adjust CPU tracing level. Values are: `1` - critical info
only, `2` - info, `3` - verbose. [default value is `2`]
python_tracer_level: Toggle tracing of Python function calls. Values are:
`1` - enabled, `0` - disabled [default value is `0`]
device_tracer_level: Adjust device (TPU/GPU) tracing level. Values are:
`1` - enabled, `0` - disabled [default value is `1`]
delay_ms: Requests for all hosts to start profiling at a timestamp that is
`delay_ms` away from the current time. `delay_ms` is in milliseconds. If
zero, each host will start profiling immediately upon receiving the
request. Default value is `None`, allowing the profiler guess the best
value.
"""
def __new__(cls,
host_tracer_level=2,
python_tracer_level=0,
device_tracer_level=1,
delay_ms=None):
return super(ProfilerOptions,
cls).__new__(cls, host_tracer_level, python_tracer_level,
device_tracer_level, delay_ms)
@tf_export('profiler.experimental.start', v1=[])
def start(logdir, options=None):
"""Start profiling TensorFlow performance.
Args:
logdir: Profiling results log directory.
options: `ProfilerOptions` namedtuple to specify miscellaneous profiler
options. See example usage below.
Raises:
AlreadyExistsError: If a profiling session is already running.
Example usage:
```python
options = tf.profiler.experimental.ProfilerOptions(host_tracer_level = 3,
python_tracer_level = 1,
device_tracer_level = 1)
tf.profiler.experimental.start('logdir_path', options = options)
# Training code here
tf.profiler.experimental.stop()
```
To view the profiling results, launch TensorBoard and point it to `logdir`.
Open your browser and go to `localhost:6006/#profile` to view profiling
results.
"""
global _profiler
with _profiler_lock:
if _profiler is not None:
raise errors.AlreadyExistsError(None, None,
'Another profiler is running.')
_profiler = _pywrap_profiler.ProfilerSession()
try:
# support for namedtuple in pybind11 is missing, we change it to
# dict type first.
opts = dict(options._asdict()) if options is not None else {}
_profiler.start(logdir, opts)
except errors.AlreadyExistsError:
logging.warning('Another profiler session is running which is probably '
'created by profiler server. Please avoid using profiler '
'server and profiler APIs at the same time.')
raise errors.AlreadyExistsError(None, None,
'Another profiler is running.')
except Exception:
_profiler = None
raise
@tf_export('profiler.experimental.stop', v1=[])
def stop(save=True):
"""Stops the current profiling session.
The profiler session will be stopped and profile results can be saved.
Args:
save: An optional variable to save the results to TensorBoard. Default True.
Raises:
UnavailableError: If there is no active profiling session.
"""
global _profiler
with _profiler_lock:
if _profiler is None:
raise errors.UnavailableError(
None, None,
'Cannot export profiling results. No profiler is running.')
if save:
try:
_profiler.export_to_tb()
except Exception:
_profiler = None
raise
_profiler = None
def warmup():
"""Warm-up the profiler session.
The profiler session will set up profiling context, including loading CUPTI
library for GPU profiling. This is used for improving the accuracy of
the profiling results.
"""
start('')
stop(save=False)
@tf_export('profiler.experimental.server.start', v1=[])
def start_server(port):
"""Start a profiler grpc server that listens to given port.
The profiler server will exit when the process finishes. The service is
defined in tensorflow/core/profiler/profiler_service.proto.
Args:
port: port profiler server listens to.
Example usage: ```python tf.profiler.experimental.server.start(6009) # do
your training here.
"""
_pywrap_profiler.start_server(port)
@tf_export('profiler.experimental.Profile', v1=[])
| ProfilerOptions |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 300028,
"end": 302110
} | class ____(StatNode):
# del statement
#
# args [ExprNode]
child_attrs = ["args"]
ignore_nonexisting = False
def analyse_declarations(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_expressions(self, env):
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_expression(env, None)
if arg.type.is_pyobject or (arg.is_name and arg.type.is_memoryviewslice):
if arg.is_name and arg.entry.is_cglobal:
error(arg.pos, "Deletion of global C variable")
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
self.cpp_check(env)
elif arg.type.is_cpp_class:
error(arg.pos, "Deletion of non-heap C++ object")
elif arg.is_subscript and arg.base.type is Builtin.bytearray_type:
pass # del ba[i]
else:
error(arg.pos, "Deletion of non-Python, non-C++ object")
#arg.release_target_temp(env)
return self
def nogil_check(self, env):
for arg in self.args:
if arg.type.is_pyobject:
self.gil_error()
gil_message = "Deleting Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
for arg in self.args:
if (arg.type.is_pyobject or
arg.type.is_memoryviewslice or
arg.is_subscript and arg.base.type is Builtin.bytearray_type):
arg.generate_deletion_code(
code, ignore_nonexisting=self.ignore_nonexisting)
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
arg.generate_evaluation_code(code)
code.putln("delete %s;" % arg.result())
arg.generate_disposal_code(code)
arg.free_temps(code)
# else error reported earlier
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
| DelStatNode |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_resource_claim.py | {
"start": 383,
"end": 7590
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1ResourceClaimSpec',
'status': 'V1beta1ResourceClaimStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ResourceClaim - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1beta1ResourceClaim. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1ResourceClaim. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1ResourceClaim.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1ResourceClaim. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1beta1ResourceClaim. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1ResourceClaim. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1ResourceClaim.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1ResourceClaim. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1ResourceClaim. # noqa: E501
:return: The metadata of this V1beta1ResourceClaim. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1ResourceClaim.
:param metadata: The metadata of this V1beta1ResourceClaim. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1beta1ResourceClaim. # noqa: E501
:return: The spec of this V1beta1ResourceClaim. # noqa: E501
:rtype: V1beta1ResourceClaimSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1beta1ResourceClaim.
:param spec: The spec of this V1beta1ResourceClaim. # noqa: E501
:type: V1beta1ResourceClaimSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this V1beta1ResourceClaim. # noqa: E501
:return: The status of this V1beta1ResourceClaim. # noqa: E501
:rtype: V1beta1ResourceClaimStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1beta1ResourceClaim.
:param status: The status of this V1beta1ResourceClaim. # noqa: E501
:type: V1beta1ResourceClaimStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ResourceClaim):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ResourceClaim):
return True
return self.to_dict() != other.to_dict()
| V1beta1ResourceClaim |
python | keon__algorithms | tests/test_strings.py | {
"start": 18402,
"end": 18867
} | class ____(unittest.TestCase):
"""[summary]
Test for the file atbash_cipher.py
Arguments:
unittest {[type]} -- [description]
"""
def test_atbash_cipher(self):
self.assertEqual("zyxwvutsrqponml", atbash("abcdefghijklmno"))
self.assertEqual("KbgslM", atbash("PythoN"))
self.assertEqual("AttaCK at DawN", atbash("ZggzXP zg WzdM"))
self.assertEqual("ZggzXP zg WzdM", atbash("AttaCK at DawN"))
| TestAtbashCipher |
python | sqlalchemy__sqlalchemy | test/orm/test_dataclasses.py | {
"start": 21159,
"end": 25452
} | class ____(FieldEmbeddedMixinWLambdaTest):
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@dataclasses.dataclass
class WidgetDC:
__sa_dataclass_metadata_key__ = "sa"
widget_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk on mixin
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": declared_attr(
lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
)
},
)
has_a_default: str = dataclasses.field(
default="some default",
metadata={"sa": declared_attr(lambda: Column(String(50)))},
)
@declarative
@dataclasses.dataclass
class Widget(WidgetDC):
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__tablename__ = "special_widgets"
__sa_dataclass_metadata_key__ = "sa"
special_widget_id: int = dataclasses.field(
init=False,
metadata={
"sa": Column(
ForeignKey("widgets.widget_id"), primary_key=True
)
},
)
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@dataclasses.dataclass
class AccountDC:
__sa_dataclass_metadata_key__ = "sa"
# relationship on mixin
widgets: List[Widget] = dataclasses.field(
default_factory=list,
metadata={"sa": declared_attr(lambda: relationship("Widget"))},
)
account_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
@declarative
class Account(AccountDC):
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
@declarative
@dataclasses.dataclass
class User:
__tablename__ = "user"
__sa_dataclass_metadata_key__ = "sa"
user_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk w declared attr on mapped class
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": declared_attr(
lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
)
},
)
cls.classes["Account"] = Account
cls.classes["Widget"] = Widget
cls.classes["User"] = User
cls.classes["SpecialWidget"] = SpecialWidget
| FieldEmbeddedMixinWDeclaredAttrTest |
python | docker__docker-py | tests/integration/models_containers_test.py | {
"start": 188,
"end": 11572
} | class ____(BaseIntegrationTest):
def test_run(self):
client = docker.from_env(version=TEST_API_VERSION)
assert client.containers.run(
"alpine", "echo hello world", remove=True
) == b'hello world\n'
def test_run_detach(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
assert container.attrs['Config']['Image'] == "alpine"
assert container.attrs['Config']['Cmd'] == ['sleep', '300']
def test_run_with_error(self):
client = docker.from_env(version=TEST_API_VERSION)
with pytest.raises(docker.errors.ContainerError) as cm:
client.containers.run("alpine", "cat /test", remove=True)
assert cm.value.exit_status == 1
assert "cat /test" in cm.exconly()
assert "alpine" in cm.exconly()
assert "No such file or directory" in cm.exconly()
def test_run_with_image_that_does_not_exist(self):
client = docker.from_env(version=TEST_API_VERSION)
with pytest.raises(docker.errors.ImageNotFound):
client.containers.run("dockerpytest_does_not_exist")
@pytest.mark.skipif(
docker.constants.IS_WINDOWS_PLATFORM, reason="host mounts on Windows"
)
def test_run_with_volume(self):
client = docker.from_env(version=TEST_API_VERSION)
path = tempfile.mkdtemp()
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
volumes=[f"{path}:/insidecontainer"],
detach=True
)
self.tmp_containers.append(container.id)
container.wait()
name = "container_volume_test"
out = client.containers.run(
"alpine", "cat /insidecontainer/test",
volumes=[f"{path}:/insidecontainer"],
name=name
)
self.tmp_containers.append(name)
assert out == b'hello\n'
def test_run_with_named_volume(self):
client = docker.from_env(version=TEST_API_VERSION)
volume = client.volumes.create(name="somevolume")
self.tmp_volumes.append(volume.id)
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
volumes=["somevolume:/insidecontainer"],
detach=True
)
self.tmp_containers.append(container.id)
container.wait()
name = "container_volume_test"
out = client.containers.run(
"alpine", "cat /insidecontainer/test",
volumes=["somevolume:/insidecontainer"],
name=name
)
self.tmp_containers.append(name)
assert out == b'hello\n'
def test_run_with_network(self):
net_name = random_name()
client = docker.from_env(version=TEST_API_VERSION)
client.networks.create(net_name)
self.tmp_networks.append(net_name)
container = client.containers.run(
'alpine', 'echo hello world', network=net_name,
detach=True
)
self.tmp_containers.append(container.id)
attrs = container.attrs
assert 'NetworkSettings' in attrs
assert 'Networks' in attrs['NetworkSettings']
assert list(attrs['NetworkSettings']['Networks'].keys()) == [net_name]
def test_run_with_networking_config(self):
net_name = random_name()
client = docker.from_env(version=TEST_API_VERSION)
client.networks.create(net_name)
self.tmp_networks.append(net_name)
test_alias = 'hello'
test_driver_opt = {'key1': 'a'}
networking_config = {
net_name: client.api.create_endpoint_config(
aliases=[test_alias],
driver_opt=test_driver_opt
)
}
container = client.containers.run(
'alpine', 'echo hello world', network=net_name,
networking_config=networking_config,
detach=True
)
self.tmp_containers.append(container.id)
attrs = container.attrs
assert 'NetworkSettings' in attrs
assert 'Networks' in attrs['NetworkSettings']
assert list(attrs['NetworkSettings']['Networks'].keys()) == [net_name]
# Aliases no longer include the container's short-id in API v1.45.
assert attrs['NetworkSettings']['Networks'][net_name]['Aliases'] \
== [test_alias]
assert attrs['NetworkSettings']['Networks'][net_name]['DriverOpts'] \
== test_driver_opt
def test_run_with_networking_config_with_undeclared_network(self):
net_name = random_name()
client = docker.from_env(version=TEST_API_VERSION)
client.networks.create(net_name)
self.tmp_networks.append(net_name)
test_aliases = ['hello']
test_driver_opt = {'key1': 'a'}
networking_config = {
net_name: client.api.create_endpoint_config(
aliases=test_aliases,
driver_opt=test_driver_opt
),
'bar': client.api.create_endpoint_config(
aliases=['test'],
driver_opt={'key2': 'b'}
),
}
with pytest.raises(docker.errors.APIError):
container = client.containers.run(
'alpine', 'echo hello world', network=net_name,
networking_config=networking_config,
detach=True
)
self.tmp_containers.append(container.id)
def test_run_with_networking_config_only_undeclared_network(self):
net_name = random_name()
client = docker.from_env(version=TEST_API_VERSION)
client.networks.create(net_name)
self.tmp_networks.append(net_name)
networking_config = {
'bar': client.api.create_endpoint_config(
aliases=['hello'],
driver_opt={'key1': 'a'}
),
}
container = client.containers.run(
'alpine', 'echo hello world', network=net_name,
networking_config=networking_config,
detach=True
)
self.tmp_containers.append(container.id)
attrs = container.attrs
assert 'NetworkSettings' in attrs
assert 'Networks' in attrs['NetworkSettings']
assert list(attrs['NetworkSettings']['Networks'].keys()) == [net_name]
# Aliases no longer include the container's short-id in API v1.45.
assert (attrs['NetworkSettings']['Networks'][net_name]['Aliases']
is None)
assert (attrs['NetworkSettings']['Networks'][net_name]['DriverOpts']
is None)
def test_run_with_none_driver(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
"alpine", "echo hello",
log_config={"type": 'none'}
)
assert out is None
def test_run_with_json_file_driver(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
"alpine", "echo hello",
log_config={"type": 'json-file'}
)
assert out == b'hello\n'
@requires_api_version('1.25')
def test_run_with_auto_remove(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
# sleep(2) to allow any communication with the container
# before it gets removed by the host.
'alpine', 'sh -c "echo hello && sleep 2"', auto_remove=True
)
assert out == b'hello\n'
@requires_api_version('1.25')
def test_run_with_auto_remove_error(self):
client = docker.from_env(version=TEST_API_VERSION)
with pytest.raises(docker.errors.ContainerError) as e:
client.containers.run(
# sleep(2) to allow any communication with the container
# before it gets removed by the host.
'alpine', 'sh -c ">&2 echo error && sleep 2 && exit 1"',
auto_remove=True
)
assert e.value.exit_status == 1
assert e.value.stderr is None
def test_run_with_streamed_logs(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
'alpine', 'sh -c "echo hello && echo world"', stream=True
)
logs = list(out)
assert logs[0] == b'hello\n'
assert logs[1] == b'world\n'
@pytest.mark.timeout(5)
@pytest.mark.skipif(os.environ.get('DOCKER_HOST', '').startswith('ssh://'),
reason='No cancellable streams over SSH')
def test_run_with_streamed_logs_and_cancel(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
'alpine', 'sh -c "echo hello && echo world"', stream=True
)
threading.Timer(1, out.close).start()
logs = list(out)
assert len(logs) == 2
assert logs[0] == b'hello\n'
assert logs[1] == b'world\n'
def test_run_with_proxy_config(self):
client = docker.from_env(version=TEST_API_VERSION)
client.api._proxy_configs = docker.utils.proxy.ProxyConfig(
ftp='sakuya.jp:4967'
)
out = client.containers.run('alpine', 'sh -c "env"')
assert b'FTP_PROXY=sakuya.jp:4967\n' in out
assert b'ftp_proxy=sakuya.jp:4967\n' in out
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
assert client.containers.get(container.id).attrs[
'Config']['Image'] == "alpine"
def test_list(self):
client = docker.from_env(version=TEST_API_VERSION)
container_id = client.containers.run(
"alpine", "sleep 300", detach=True).id
self.tmp_containers.append(container_id)
containers = [c for c in client.containers.list() if c.id ==
container_id]
assert len(containers) == 1
container = containers[0]
assert container.attrs['Config']['Image'] == 'alpine'
assert container.status == 'running'
assert container.image == client.images.get('alpine')
container.kill()
container.remove()
assert container_id not in [c.id for c in client.containers.list()]
def test_list_sparse(self):
client = docker.from_env(version=TEST_API_VERSION)
container_id = client.containers.run(
"alpine", "sleep 300", detach=True).id
self.tmp_containers.append(container_id)
containers = [c for c in client.containers.list(sparse=True) if c.id ==
container_id]
assert len(containers) == 1
container = containers[0]
assert container.attrs['Image'] == 'alpine'
assert container.status == 'running'
assert container.image == client.images.get('alpine')
with pytest.raises(docker.errors.DockerException):
_ = container.labels
container.kill()
container.remove()
assert container_id not in [c.id for c in client.containers.list()]
| ContainerCollectionTest |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/fault_tolerance_test.py | {
"start": 2379,
"end": 5670
} | class ____(test.TestCase):
"""Test preemptions during strategy init."""
def setUp(self):
super().setUp()
self.num_workers = 2
self.num_ps = 2
self._cluster = multi_worker_test_base.create_multi_process_cluster(
num_workers=self.num_workers,
num_ps=self.num_ps,
rpc_layer="grpc",
stream_output=True,
)
self._cluster_def = self._cluster.cluster_resolver.cluster_spec().as_dict()
self._cluster_def["chief"] = [
"localhost:%d" % multi_worker_test_base.pick_unused_port()
]
self._cluster_resolver = SimpleClusterResolver(
server_lib.ClusterSpec(self._cluster_def), rpc_layer="grpc"
)
self.thread_coord = thread_coordinator.Coordinator(
clean_stop_exception_types=[]
)
def tearDown(self):
super().tearDown()
self._cluster.stop()
self._cluster = None
def testWorkerPreemptionDuringInit(self):
worker_down = threading.Condition()
def _restart_in_thread(downtime_secs, restart_job):
# During initial connection in SetOrUpdateServerDef, there is a step that
# waits to receive a `GetStatus` response from each worker, in an attempt
# to ensure workers are available before sending `CreateContext` to them.
# In high-preemption environments, the time between these two requests can
# be enough for a worker to be preempted, at which point the
# `CreateContext` request will fail and training cannot start. b/298187302
# We solve this by enabling retries on `CreateContext` calls when used
# with PSS.
# This test reproduces this behavior by bringing down a worker during
# startup, then waiting long enough for strategy startup to reach the
# point where it is just waiting for a `GetStatus` response from that
# worker. Then, we restart the first downed worker and bring down another
# worker at the same time, so the strategy startup progresses to the
# `CreateContext` requests, but not before another worker is down.
def _restart_fn():
with self.thread_coord.stop_on_exception():
self._cluster.kill_task(restart_job, 0)
with worker_down:
worker_down.notify_all()
time.sleep(downtime_secs)
self._cluster.start_task(restart_job, 0)
self._cluster.kill_task(restart_job, 1)
time.sleep(downtime_secs)
self._cluster.start_task(restart_job, 1)
restart_thread = threading.Thread(target=_restart_fn)
restart_thread.start()
return restart_thread
_restart_in_thread(downtime_secs=2, restart_job="worker")
with worker_down:
worker_down.wait()
# The strategy's constructor would connect to the cluster.
self.strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self._cluster_resolver
)
self.cluster_coord = cluster_coordinator.ClusterCoordinator(self.strategy)
# Now do a simple training as a sanity check
model = fault_tolerance_test_base.Model(self.cluster_coord)
model.schedule_training_functions(2)
model.join_training_functions()
self.assertEqual(model.iterations.numpy(), 2)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
multi_process_runner.test_main()
| InitFaultToleranceTest |
python | sympy__sympy | sympy/polys/polyclasses.py | {
"start": 3641,
"end": 41552
} | class ____(CantSympify, Generic[Er]):
"""Dense Multivariate Polynomials over `K`. """
__slots__ = ()
lev: int
dom: Domain[Er]
def __new__(cls, rep: dmp[Er], dom: Domain[Er], lev: int | None = None):
if lev is None:
rep, lev = dmp_validate(rep, dom)
elif not isinstance(rep, list):
raise CoercionFailed("expected list, got %s" % type(rep))
return cls.new(rep, dom, lev)
@classmethod
def new(cls, rep: dmp[Er], dom: Domain[Er], lev: int) -> DMP_Python[Er] | DUP_Flint[Er]:
# It would be too slow to call _validate_args always at runtime.
# Ideally this checking would be handled by a static type checker.
#
#cls._validate_args(rep, dom, lev)
if flint is not None:
if lev == 0 and _supported_flint_domain(dom):
return DUP_Flint._new(rep, dom, lev)
return DMP_Python._new(rep, dom, lev)
@property
def rep(f) -> dmp[Er]:
"""Get the representation of ``f``. """
sympy_deprecation_warning("""
Accessing the ``DMP.rep`` attribute is deprecated. The internal
representation of ``DMP`` instances can now be ``DUP_Flint`` when the
ground types are ``flint``. In this case the ``DMP`` instance does not
have a ``rep`` attribute. Use ``DMP.to_list()`` instead. Using
``DMP.to_list()`` also works in previous versions of SymPy.
""",
deprecated_since_version="1.13",
active_deprecations_target="dmp-rep",
)
return f.to_list()
def to_best(f) -> DMP[Er]:
"""Convert to DUP_Flint if possible.
This method should be used when the domain or level is changed and it
potentially becomes possible to convert from DMP_Python to DUP_Flint.
"""
if flint is not None:
if isinstance(f, DMP_Python) and f.lev == 0 and _supported_flint_domain(f.dom):
return DUP_Flint.new(f._rep, f.dom, f.lev)
return f
@classmethod
def _validate_args(cls, rep: dmp[Er], dom: Domain[Er], lev: int):
assert isinstance(dom, Domain)
assert isinstance(lev, int) and lev >= 0
def validate_rep(rep: dmp[Er], lev: int):
assert isinstance(rep, list)
if lev == 0:
assert all(dom.of_type(c) for c in rep)
else:
for r in rep:
validate_rep(r, lev - 1)
validate_rep(rep, lev)
@classmethod
def from_dict(cls, rep: dict[monom, Er], lev: int, dom: Domain[Er]) -> DMP[Er]:
rep_dmp = dmp_from_dict(rep, lev, dom)
return cls.new(rep_dmp, dom, lev)
@classmethod
def from_raw_dict(cls, rep: dict[int, Er], dom: Domain[Er]) -> DMP[Er]:
rep_dup = dup_from_raw_dict(rep, dom)
return cls.new(_dmp(rep_dup), dom, 0)
@classmethod
def from_list(cls, rep: dmp[Any], lev: int, dom: Domain[Er]) -> DMP[Er]:
"""Create an instance of ``cls`` given a list of native coefficients. """
return cls.new(dmp_convert(rep, lev, None, dom), dom, lev)
@classmethod
def from_sympy_list(cls, rep: dmp[Expr], lev: int, dom: Domain[Er]) -> DMP[Er]:
"""Create an instance of ``cls`` given a list of SymPy coefficients. """
return cls.new(dmp_from_sympy(rep, lev, dom), dom, lev)
def convert(f, dom: Domain[Es]) -> DMP[Es]:
"""Convert ``f`` to a ``DMP`` over the new domain. """
if f.dom == dom:
return f # type: ignore
elif f.lev or flint is None:
return f._convert(dom)
elif isinstance(f, DUP_Flint):
if _supported_flint_domain(dom):
return f._convert(dom)
else:
return f.to_DMP_Python()._convert(dom)
elif isinstance(f, DMP_Python):
if _supported_flint_domain(dom):
return f._convert(dom).to_DUP_Flint()
else:
return f._convert(dom)
else:
raise RuntimeError("unreachable code")
def _convert(f, dom: Domain[Es]) -> DMP[Es]:
raise NotImplementedError
@classmethod
def zero(cls, lev: int, dom: Domain[Er]) -> DMP[Er]:
return DMP(dmp_zero(lev, dom), dom, lev)
@classmethod
def one(cls, lev: int, dom: Domain[Er]) -> DMP[Er]:
one: dmp[Er] = dmp_one(lev, dom) # type: ignore
return DMP(one, dom, lev)
def _one(f) -> Self:
raise NotImplementedError
def __repr__(f) -> str:
return "%s(%s, %s)" % (f.__class__.__name__, f.to_list(), f.dom)
def __hash__(f) -> int:
return hash((f.__class__.__name__, f.to_tuple(), f.lev, f.dom))
def __getnewargs__(self) -> tuple[dmp[Er], Domain[Er], int]:
return self.to_list(), self.dom, self.lev
def ground_new(f, coeff: Er) -> DMP[Er]:
"""Construct a new ground instance of ``f``. """
raise NotImplementedError
@overload
def unify_DMP(f, g: Self) -> tuple[Self, Self]:
...
@overload
def unify_DMP(f, g: DMP[Es]) -> tuple[DMP[Et], DMP[Et]]:
...
def unify_DMP(f, g: DMP[Es]) -> tuple[DMP[Et], DMP[Et]]:
"""Unify and return ``DMP`` instances of ``f`` and ``g``. """
if not isinstance(g, DMP) or f.lev != g.lev:
raise UnificationFailed("Cannot unify %s with %s" % (f, g))
if f.dom == g.dom:
return f, g # type: ignore
else:
dom: Domain[Et] = f.dom.unify(g.dom)
return f.convert(dom), g.convert(dom)
def to_dict(f, zero: bool = False) -> dict[monom, Er]:
"""Convert ``f`` to a dict representation with native coefficients. """
if zero and not f:
return {(0,)*(f.lev + 1): f.dom.zero}
else:
return dmp_to_dict(f.to_list(), f.lev, f.dom)
def to_sympy_dict(f, zero: bool = False) -> dict[monom, Expr]:
"""Convert ``f`` to a dict representation with SymPy coefficients. """
to_sympy = f.dom.to_sympy
return {k: to_sympy(v) for k, v in f.to_dict(zero=zero).items()}
def to_sympy_list(f) -> dmp[Expr]:
"""Convert ``f`` to a list representation with SymPy coefficients. """
def sympify_nested_list(rep):
out = []
for val in rep:
if isinstance(val, list):
out.append(sympify_nested_list(val))
else:
out.append(f.dom.to_sympy(val))
return out
return sympify_nested_list(f.to_list())
def to_list(f) -> dmp[Er]:
"""Convert ``f`` to a list representation with native coefficients. """
raise NotImplementedError
def to_tuple(f) -> dmp_tup[Er]:
"""
Convert ``f`` to a tuple representation with native coefficients.
This is needed for hashing.
"""
raise NotImplementedError
def to_ring(f) -> DMP:
"""Make the ground domain a ring. """
return f.convert(f.dom.get_ring())
def to_field(f) -> DMP:
"""Make the ground domain a field. """
return f.convert(f.dom.get_field())
def to_exact(f) -> DMP:
"""Make the ground domain exact. """
return f.convert(f.dom.get_exact())
def slice(f, m: int, n: int, j: int = 0) -> DMP[Er]:
"""Take a continuous subsequence of terms of ``f``. """
if not f.lev and not j:
return f._slice(m, n)
else:
return f._slice_lev(m, n, j)
def _slice(f, m: int, n: int) -> DMP[Er]:
raise NotImplementedError
def _slice_lev(f, m: int, n: int, j: int) -> DMP[Er]:
raise NotImplementedError
def coeffs(f, order=None) -> list[Er]:
"""Returns all non-zero coefficients from ``f`` in lex order. """
return [ c for _, c in f.terms(order=order) ]
def monoms(f, order=None) -> list[monom]:
"""Returns all non-zero monomials from ``f`` in lex order. """
return [ m for m, _ in f.terms(order=order) ]
def terms(f, order: MonomialOrder | None = None) -> list[tuple[monom, Er]]:
"""Returns all non-zero terms from ``f`` in lex order. """
if f.is_zero:
zero_monom = (0,)*(f.lev + 1)
return [(zero_monom, f.dom.zero)]
else:
return f._terms(order=order)
def _terms(f, order: MonomialOrder | None = None) -> list[tuple[monom, Er]]:
raise NotImplementedError
def all_coeffs(f) -> list[Er]:
"""Returns all coefficients from ``f``. """
if f.lev:
raise PolynomialError('multivariate polynomials not supported')
if not f:
return [f.dom.zero]
else:
return list(_dup(f.to_list()))
def all_monoms(f) -> list[monom]:
"""Returns all monomials from ``f``. """
if f.lev:
raise PolynomialError('multivariate polynomials not supported')
n = f.degree()
if n < 0:
return [(0,)]
else:
return [ (n - i,) for i, c in enumerate(f.to_list()) ]
def all_terms(f) -> list[tuple[monom, Er]]:
"""Returns all terms from a ``f``. """
if f.lev:
raise PolynomialError('multivariate polynomials not supported')
n = f.degree()
if n < 0:
return [((0,), f.dom.zero)]
else:
return [ ((n - i,), c) for i, c in enumerate(_dup(f.to_list())) ]
def lift(f) -> DMP:
"""Convert algebraic coefficients to rationals. """
return f._lift().to_best()
def _lift(f) -> DMP:
raise NotImplementedError
def deflate(f) -> tuple[monom, DMP[Er]]:
"""Reduce degree of `f` by mapping `x_i^m` to `y_i`. """
raise NotImplementedError
def inject(f, front: bool = False) -> DMP:
"""Inject ground domain generators into ``f``. """
raise NotImplementedError
def eject(f, dom: PolynomialRing[Er], front: bool = False) -> DMP[PolyElement[Er]]:
"""Eject selected generators into the ground domain. """
raise NotImplementedError
def exclude(f) -> tuple[list[int], DMP[Er]]:
r"""
Remove useless generators from ``f``.
Returns the removed generators and the new excluded ``f``.
Examples
========
>>> from sympy.polys.polyclasses import DMP
>>> from sympy.polys.domains import ZZ
>>> DMP([[[ZZ(1)]], [[ZZ(1)], [ZZ(2)]]], ZZ).exclude()
([2], DMP_Python([[1], [1, 2]], ZZ))
"""
J, F = f._exclude()
return J, F.to_best()
def _exclude(f) -> tuple[list[int], DMP[Er]]:
raise NotImplementedError
def permute(f, P: list[int]) -> Self:
r"""
Returns a polynomial in `K[x_{P(1)}, ..., x_{P(n)}]`.
Examples
========
>>> from sympy.polys.polyclasses import DMP
>>> from sympy.polys.domains import ZZ
>>> DMP([[[ZZ(2)], [ZZ(1), ZZ(0)]], [[]]], ZZ).permute([1, 0, 2])
DMP_Python([[[2], []], [[1, 0], []]], ZZ)
>>> DMP([[[ZZ(2)], [ZZ(1), ZZ(0)]], [[]]], ZZ).permute([1, 2, 0])
DMP_Python([[[1], []], [[2, 0], []]], ZZ)
"""
return f._permute(P)
def _permute(f, P: list[int]) -> Self:
raise NotImplementedError
def terms_gcd(f) -> tuple[monom, Self]:
"""Remove GCD of terms from the polynomial ``f``. """
raise NotImplementedError
def abs(f) -> Self:
"""Make all coefficients in ``f`` positive. """
raise NotImplementedError
def neg(f) -> Self:
"""Negate all coefficients in ``f``. """
raise NotImplementedError
def add_ground(f, c: Er, /) -> Self:
"""Add an element of the ground domain to ``f``. """
return f._add_ground(f.dom.convert(c))
def sub_ground(f, c: Er, /) -> Self:
"""Subtract an element of the ground domain from ``f``. """
return f._sub_ground(f.dom.convert(c))
def mul_ground(f, c: Er, /) -> Self:
"""Multiply ``f`` by a an element of the ground domain. """
return f._mul_ground(f.dom.convert(c))
def quo_ground(f, c: Er, /) -> Self:
"""Quotient of ``f`` by a an element of the ground domain. """
return f._quo_ground(f.dom.convert(c))
def exquo_ground(f, c: Er, /) -> Self:
"""Exact quotient of ``f`` by a an element of the ground domain. """
return f._exquo_ground(f.dom.convert(c))
def add(f, g: Self, /) -> Self:
"""Add two multivariate polynomials ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._add(G)
def sub(f, g: Self, /) -> Self:
"""Subtract two multivariate polynomials ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._sub(G)
def mul(f, g: Self, /) -> Self:
"""Multiply two multivariate polynomials ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._mul(G)
def sqr(f) -> Self:
"""Square a multivariate polynomial ``f``. """
return f._sqr()
def pow(f, n: int, /) -> Self:
"""Raise ``f`` to a non-negative power ``n``. """
if not isinstance(n, int):
raise TypeError("``int`` expected, got %s" % type(n))
return f._pow(n)
def pdiv(f, g: Self, /) -> tuple[Self, Self]:
"""Polynomial pseudo-division of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._pdiv(G)
def prem(f, g: Self, /) -> Self:
"""Polynomial pseudo-remainder of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._prem(G)
def pquo(f, g: Self, /) -> Self:
"""Polynomial pseudo-quotient of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._pquo(G)
def pexquo(f, g: Self, /) -> Self:
"""Polynomial exact pseudo-quotient of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._pexquo(G)
def div(f, g: Self, /) -> tuple[Self, Self]:
"""Polynomial division with remainder of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._div(G)
def rem(f, g: Self, /) -> Self:
"""Computes polynomial remainder of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._rem(G)
def quo(f, g: Self, /) -> Self:
"""Computes polynomial quotient of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._quo(G)
def exquo(f, g: Self, /) -> Self:
"""Computes polynomial exact quotient of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._exquo(G)
def _add_ground(f, c: Er, /) -> Self:
raise NotImplementedError
def _sub_ground(f, c: Er, /) -> Self:
raise NotImplementedError
def _mul_ground(f, c: Er, /) -> Self:
raise NotImplementedError
def _quo_ground(f, c: Er, /) -> Self:
raise NotImplementedError
def _exquo_ground(f, c: Er, /) -> Self:
raise NotImplementedError
def _add(f, g: Self, /) -> Self:
raise NotImplementedError
def _sub(f, g: Self, /) -> Self:
raise NotImplementedError
def _mul(f, g: Self, /) -> Self:
raise NotImplementedError
def _sqr(f) -> Self:
raise NotImplementedError
def _pow(f, n: int, /) -> Self:
raise NotImplementedError
def _pdiv(f, g: Self, /) -> tuple[Self, Self]:
raise NotImplementedError
def _prem(f, g: Self, /) -> Self:
raise NotImplementedError
def _pquo(f, g: Self, /) -> Self:
raise NotImplementedError
def _pexquo(f, g: Self, /) -> Self:
raise NotImplementedError
def _div(f, g: Self, /) -> tuple[Self, Self]:
raise NotImplementedError
def _rem(f, g: Self, /) -> Self:
raise NotImplementedError
def _quo(f, g: Self, /) -> Self:
raise NotImplementedError
def _exquo(f, g: Self, /) -> Self:
raise NotImplementedError
def degree(f, j: int = 0) -> int:
"""Returns the leading degree of ``f`` in ``x_j``. """
if not isinstance(j, int):
raise TypeError("``int`` expected, got %s" % type(j))
return f._degree(j)
def _degree(f, j: int, /) -> int:
raise NotImplementedError
def degree_list(f) -> tuple[int, ...]:
"""Returns a list of degrees of ``f``. """
raise NotImplementedError
def total_degree(f) -> int:
"""Returns the total degree of ``f``. """
raise NotImplementedError
def homogenize(f, s: int) -> DMP[Er]:
"""Return homogeneous polynomial of ``f``"""
# XXX: Handle the zero polynomial case?
td = f.total_degree()
result: dict[monom, Er] = {}
new_symbol = (s == len(f.terms()[0][0]))
for term in f.terms():
d = sum(term[0])
if d < td:
i = td - d
else:
i = 0
if new_symbol:
result[term[0] + (i,)] = term[1]
else:
l = list(term[0])
l[s] += i
result[tuple(l)] = term[1]
return DMP.from_dict(result, f.lev + int(new_symbol), f.dom)
def homogeneous_order(f) -> NegativeInfinity | int | None:
"""Returns the homogeneous order of ``f``. """
if f.is_zero:
return -oo
monoms = f.monoms()
tdeg = sum(monoms[0])
for m in monoms:
_tdeg = sum(m)
if _tdeg != tdeg:
return None
return tdeg
def LC(f) -> Er:
"""Returns the leading coefficient of ``f``. """
raise NotImplementedError
def TC(f) -> Er:
"""Returns the trailing coefficient of ``f``. """
raise NotImplementedError
def nth(f, *N: int) -> Er:
"""Returns the ``n``-th coefficient of ``f``. """
if all(isinstance(n, int) for n in N):
return f._nth(N)
else:
raise TypeError("a sequence of integers expected")
def _nth(f, N: tuple[int, ...]) -> Er:
raise NotImplementedError
def max_norm(f) -> Er:
"""Returns maximum norm of ``f``. """
raise NotImplementedError
def l1_norm(f) -> Er:
"""Returns l1 norm of ``f``. """
raise NotImplementedError
def l2_norm_squared(f) -> Er:
"""Return squared l2 norm of ``f``. """
raise NotImplementedError
def clear_denoms(f) -> tuple[Er, Self]:
"""Clear denominators, but keep the ground domain. """
raise NotImplementedError
def integrate(f, m: int = 1, j: int = 0) -> Self:
"""Computes the ``m``-th order indefinite integral of ``f`` in ``x_j``. """
if not isinstance(m, int):
raise TypeError("``int`` expected, got %s" % type(m))
if not isinstance(j, int):
raise TypeError("``int`` expected, got %s" % type(j))
return f._integrate(m, j)
def _integrate(f, m: int, j: int) -> Self:
raise NotImplementedError
def diff(f, m: int = 1, j: int = 0) -> Self:
"""Computes the ``m``-th order derivative of ``f`` in ``x_j``. """
if not isinstance(m, int):
raise TypeError("``int`` expected, got %s" % type(m))
if not isinstance(j, int):
raise TypeError("``int`` expected, got %s" % type(j))
return f._diff(m, j)
def _diff(f, m: int, j: int) -> Self:
raise NotImplementedError
def eval(f, a: Any, j: int = 0) -> Any:
"""Evaluates ``f`` at the given point ``a`` in ``x_j``. """
if not isinstance(j, int):
raise TypeError("``int`` expected, got %s" % type(j))
elif not (0 <= j <= f.lev):
raise ValueError("invalid variable index %s" % j)
if f.lev:
return f._eval_lev(a, j)
else:
return f._eval(a)
def _eval(f, a: Any, /) -> Any:
raise NotImplementedError
def _eval_lev(f, a: Any, j: int, /) -> Any:
raise NotImplementedError
def half_gcdex(f, g: Self) -> tuple[Self, Self]:
"""Half extended Euclidean algorithm, if univariate. """
F, G = f.unify_DMP(g)
if F.lev:
raise ValueError('univariate polynomial expected')
return F._half_gcdex(G)
def _half_gcdex(f, g: Self) -> tuple[Self, Self]:
raise NotImplementedError
def gcdex(f, g: Self) -> tuple[Self, Self, Self]:
"""Extended Euclidean algorithm, if univariate. """
F, G = f.unify_DMP(g)
if F.lev:
raise ValueError('univariate polynomial expected')
if not F.dom.is_Field:
raise DomainError('ground domain must be a field')
return F._gcdex(G)
def _gcdex(f, g: Self) -> tuple[Self, Self, Self]:
raise NotImplementedError
def invert(f, g: Self) -> Self:
"""Invert ``f`` modulo ``g``, if possible. """
F, G = f.unify_DMP(g)
if F.lev:
raise ValueError('univariate polynomial expected')
return F._invert(G)
def _invert(f, g: Self) -> Self:
raise NotImplementedError
def revert(f, n: int) -> Self:
"""Compute ``f**(-1)`` mod ``x**n``. """
if f.lev:
raise ValueError('univariate polynomial expected')
return f._revert(n)
def _revert(f, n: int) -> Self:
raise NotImplementedError
def subresultants(f, g: Self) -> list[Self]:
"""Computes subresultant PRS sequence of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._subresultants(G)
def _subresultants(f, g: Self) -> list[Self]:
raise NotImplementedError
@overload
def resultant(
f, g: Self, includePRS: Literal[True]
) -> tuple[DMP[Er] | Er, list[DMP[Er]]]: ...
@overload
def resultant(f, g: Self, includePRS: Literal[False] = ...) -> DMP[Er] | Er: ...
def resultant(
f, g: Self, includePRS: bool = False
) -> DMP[Er] | Er | tuple[DMP[Er] | Er, list[DMP[Er]]]:
"""Computes resultant of ``f`` and ``g`` via PRS. """
F, G = f.unify_DMP(g)
if includePRS:
return F._resultant_includePRS(G)
else:
return F._resultant(G)
def _resultant(f, g: Self) -> DMP[Er] | Er:
raise NotImplementedError
def _resultant_includePRS(f, g: Self) -> tuple[DMP[Er] | Er, list[DMP[Er]]]:
raise NotImplementedError
def discriminant(f) -> Self | Er:
"""Computes discriminant of ``f``. """
raise NotImplementedError
def cofactors(f, g: Self) -> tuple[Self, Self, Self]:
"""Returns GCD of ``f`` and ``g`` and their cofactors. """
F, G = f.unify_DMP(g)
return F._cofactors(G)
def _cofactors(f, g: Self) -> tuple[Self, Self, Self]:
raise NotImplementedError
def gcd(f, g: Self) -> Self:
"""Returns polynomial GCD of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._gcd(G)
def _gcd(f, g: Self) -> Self:
raise NotImplementedError
def lcm(f, g: Self) -> Self:
"""Returns polynomial LCM of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._lcm(G)
def _lcm(f, g: Self) -> Self:
raise NotImplementedError
@overload
def cancel(f: Self, g: Self, include: Literal[True] = ...) -> tuple[Self, Self]:
...
@overload
def cancel(f: Self, g: Self, include: Literal[False]) -> tuple[Er, Er, Self, Self]:
...
def cancel(f: Self, g: Self, include: bool = True) -> tuple[Self, Self] | tuple[Er, Er, Self, Self]:
"""Cancel common factors in a rational function ``f/g``. """
F, G = f.unify_DMP(g)
if include:
return F._cancel_include(G)
else:
return F._cancel(G)
def _cancel(f, g: Self) -> tuple[Er, Er, Self, Self]:
raise NotImplementedError
def _cancel_include(f, g: Self) -> tuple[Self, Self]:
raise NotImplementedError
def trunc(f, p: Er) -> Self:
"""Reduce ``f`` modulo a constant ``p``. """
return f._trunc(f.dom.convert(p))
def _trunc(f, p: Er) -> Self:
raise NotImplementedError
def monic(f) -> Self:
"""Divides all coefficients by ``LC(f)``. """
raise NotImplementedError
def content(f) -> Er:
"""Returns GCD of polynomial coefficients. """
raise NotImplementedError
def primitive(f) -> tuple[Er, Self]:
"""Returns content and a primitive form of ``f``. """
raise NotImplementedError
def compose(f, g: Self) -> Self:
"""Computes functional composition of ``f`` and ``g``. """
F, G = f.unify_DMP(g)
return F._compose(G)
def _compose(f, g: Self) -> Self:
raise NotImplementedError
def decompose(f) -> list[Self]:
"""Computes functional decomposition of ``f``. """
if f.lev:
raise ValueError('univariate polynomial expected')
return f._decompose()
def _decompose(f) -> list[Self]:
raise NotImplementedError
def shift(f, a: Er) -> Self:
"""Efficiently compute Taylor shift ``f(x + a)``. """
if f.lev:
raise ValueError('univariate polynomial expected')
return f._shift(f.dom.convert(a))
def _shift(f, a: Er) -> Self:
raise NotImplementedError
def shift_list(f, a: list[Any]) -> Self:
"""Efficiently compute Taylor shift ``f(X + A)``. """
a = [f.dom.convert(ai) for ai in a]
return f._shift_list(a)
def _shift_list(f, a: list[Er]) -> Self:
raise NotImplementedError
def transform(f, p: Self, q: Self) -> Self:
"""Evaluate functional transformation ``q**n * f(p/q)``."""
if f.lev:
raise ValueError('univariate polynomial expected')
P, Q = p.unify_DMP(q)
F, P = f.unify_DMP(P)
F, Q = F.unify_DMP(Q)
return F._transform(P, Q)
def _transform(f, p: Self, q: Self) -> Self:
raise NotImplementedError
def sturm(f) -> list[Self]:
"""Computes the Sturm sequence of ``f``. """
if f.lev:
raise ValueError('univariate polynomial expected')
return f._sturm()
def _sturm(f) -> list[Self]:
raise NotImplementedError
def cauchy_upper_bound(f) -> Er:
"""Computes the Cauchy upper bound on the roots of ``f``. """
if f.lev:
raise ValueError('univariate polynomial expected')
return f._cauchy_upper_bound()
def _cauchy_upper_bound(f) -> Er:
raise NotImplementedError
def cauchy_lower_bound(f) -> Er:
"""Computes the Cauchy lower bound on the nonzero roots of ``f``. """
if f.lev:
raise ValueError('univariate polynomial expected')
return f._cauchy_lower_bound()
def _cauchy_lower_bound(f) -> Er:
raise NotImplementedError
def mignotte_sep_bound_squared(f) -> Er:
"""Computes the squared Mignotte bound on root separations of ``f``. """
if f.lev:
raise ValueError('univariate polynomial expected')
return f._mignotte_sep_bound_squared()
def _mignotte_sep_bound_squared(f) -> Er:
raise NotImplementedError
def gff_list(f) -> list[tuple[Self, int]]:
"""Computes greatest factorial factorization of ``f``. """
if f.lev:
raise ValueError('univariate polynomial expected')
return f._gff_list()
def _gff_list(f) -> list[tuple[Self, int]]:
raise NotImplementedError
def norm(f) -> DMP:
"""Computes ``Norm(f)``."""
raise NotImplementedError
def sqf_norm(f) -> tuple[list[int], Self, DMP]:
"""Computes square-free norm of ``f``. """
raise NotImplementedError
def sqf_part(f) -> Self:
"""Computes square-free part of ``f``. """
raise NotImplementedError
def sqf_list(f, all: bool = False) -> tuple[Er, list[tuple[Self, int]]]:
"""Returns a list of square-free factors of ``f``. """
raise NotImplementedError
def sqf_list_include(f, all: bool = False) -> list[tuple[Self, int]]:
"""Returns a list of square-free factors of ``f``. """
raise NotImplementedError
def factor_list(f) -> tuple[Er, list[tuple[Self, int]]]:
"""Returns a list of irreducible factors of ``f``. """
raise NotImplementedError
def factor_list_include(f) -> list[tuple[Self, int]]:
"""Returns a list of irreducible factors of ``f``. """
raise NotImplementedError
@overload
def intervals(
f,
all: Literal[False] = ...,
eps: MPQ | None = ...,
inf: MPQ | None = ...,
sup: MPQ | None = ...,
fast: bool = ...,
sqf: bool = ...,
) -> list[tuple[tuple[MPQ, MPQ], int]]:
...
@overload
def intervals(
f,
all: Literal[True],
eps: MPQ | None = ...,
inf: MPQ | None = ...,
sup: MPQ | None = ...,
fast: bool = ...,
sqf: bool = ...,
) -> list[tuple[tuple[tuple[MPQ, MPQ], tuple[MPQ, MPQ]], int]]:
...
def intervals(
f,
all: bool = False,
eps: MPQ | None = None,
inf: MPQ | None = None,
sup: MPQ | None = None,
fast: bool = False,
sqf: bool = False,
) -> (
list[tuple[tuple[MPQ, MPQ], int]]
| list[tuple[tuple[tuple[MPQ, MPQ], tuple[MPQ, MPQ]], int]]
):
"""Compute isolating intervals for roots of ``f``. """
if f.lev:
raise PolynomialError("Cannot isolate roots of a multivariate polynomial")
if all and sqf:
return f._isolate_all_roots_sqf(eps=eps, inf=inf, sup=sup, fast=fast)
elif all and not sqf:
return f._isolate_all_roots(eps=eps, inf=inf, sup=sup, fast=fast)
elif not all and sqf:
return f._isolate_real_roots_sqf(eps=eps, inf=inf, sup=sup, fast=fast)
else:
return f._isolate_real_roots(eps=eps, inf=inf, sup=sup, fast=fast)
def _isolate_real_roots(
f, eps: MPQ | None, inf: MPQ | None, sup: MPQ | None, fast: bool
) -> list[tuple[tuple[MPQ, MPQ], int]]:
raise NotImplementedError
def _isolate_real_roots_sqf(
f, eps: MPQ | None, inf: MPQ | None, sup: MPQ | None, fast: bool
) -> list[tuple[tuple[MPQ, MPQ], int]]:
raise NotImplementedError
def _isolate_all_roots(
f, eps: MPQ | None, inf: MPQ | None, sup: MPQ | None, fast: bool
) -> list[tuple[tuple[tuple[MPQ, MPQ], tuple[MPQ, MPQ]], int]]:
raise NotImplementedError
def _isolate_all_roots_sqf(
f, eps: MPQ | None, inf: MPQ | None, sup: MPQ | None, fast: bool
) -> list[tuple[tuple[tuple[MPQ, MPQ], tuple[MPQ, MPQ]], int]]:
raise NotImplementedError
def refine_root(
f, s: MPQ, t: MPQ, eps: MPQ | None, steps: int | None, fast: bool
) -> tuple[MPQ, MPQ]:
"""
Refine an isolating interval to the given precision.
``eps`` should be a rational number.
"""
if f.lev:
raise PolynomialError(
"Cannot refine a root of a multivariate polynomial")
return f._refine_real_root(s, t, eps=eps, steps=steps, fast=fast)
def _refine_real_root(
f, s: MPQ, t: MPQ, eps: MPQ | None, steps: int | None, fast: bool
) -> tuple[MPQ, MPQ]:
raise NotImplementedError
def count_real_roots(f, inf: MPQ | None = None, sup: MPQ | None = None) -> int:
"""Return the number of real roots of ``f`` in ``[inf, sup]``. """
raise NotImplementedError
def count_complex_roots(
f, inf: tuple[MPQ, MPQ] | None = None, sup: tuple[MPQ, MPQ] | None = None
) -> int:
"""Return the number of complex roots of ``f`` in ``[inf, sup]``. """
raise NotImplementedError
def hurwitz_conditions(f) -> list[Er]:
"""Computes the Routh Hurwitz criteria of ``f``. """
raise NotImplementedError
def schur_conditions(f) -> list[Er]:
"""Computes the Schur conditions of ``f``. """
raise NotImplementedError
@property
def is_zero(f) -> bool:
"""Returns ``True`` if ``f`` is a zero polynomial. """
raise NotImplementedError
@property
def is_one(f) -> bool:
"""Returns ``True`` if ``f`` is a unit polynomial. """
raise NotImplementedError
@property
def is_ground(f) -> bool:
"""Returns ``True`` if ``f`` is an element of the ground domain. """
raise NotImplementedError
@property
def is_sqf(f) -> bool:
"""Returns ``True`` if ``f`` is a square-free polynomial. """
raise NotImplementedError
@property
def is_monic(f) -> bool:
"""Returns ``True`` if the leading coefficient of ``f`` is one. """
raise NotImplementedError
@property
def is_primitive(f) -> bool:
"""Returns ``True`` if the GCD of the coefficients of ``f`` is one. """
raise NotImplementedError
@property
def is_linear(f) -> bool:
"""Returns ``True`` if ``f`` is linear in all its variables. """
raise NotImplementedError
@property
def is_quadratic(f) -> bool:
"""Returns ``True`` if ``f`` is quadratic in all its variables. """
raise NotImplementedError
@property
def is_monomial(f) -> bool:
"""Returns ``True`` if ``f`` is zero or has only one term. """
raise NotImplementedError
@property
def is_homogeneous(f) -> bool:
"""Returns ``True`` if ``f`` is a homogeneous polynomial. """
raise NotImplementedError
@property
def is_irreducible(f) -> bool:
"""Returns ``True`` if ``f`` has no factors over its domain. """
raise NotImplementedError
@property
def is_cyclotomic(f) -> bool:
"""Returns ``True`` if ``f`` is a cyclotomic polynomial. """
raise NotImplementedError
def __abs__(f) -> Self:
return f.abs()
# XXX: Maybe remove the dunder methods like __add__? It is not clear that
# they are really needed e.g. Poly does not use them and for internal code
# it is better to use strictly typed methods like add, add_ground etc.
# We need type: ignore below because otherwise there is not any way to
# satisfy both mypy and pyright. The checks in __add__ etc are not really
# sufficient to know that the types are correct but are good enough given
# how DMP is actually used.
def __neg__(f) -> Self:
return f.neg()
def __add__(f, g: Self | Er) -> Self:
if isinstance(g, DMP):
return f.add(g) # type: ignore
else:
try:
return f.add_ground(g)
except CoercionFailed:
return NotImplemented
def __radd__(f, g: Er) -> Self:
return f.__add__(g)
def __sub__(f, g: Self | Er) -> Self:
if isinstance(g, DMP):
return f.sub(g) # type: ignore
else:
try:
return f.sub_ground(g)
except CoercionFailed:
return NotImplemented
def __rsub__(f, g: Er) -> Self:
return (-f).__add__(g)
def __mul__(f, g: Self | Er) -> Self:
if isinstance(g, DMP):
return f.mul(g) # type: ignore
else:
try:
return f.mul_ground(g)
except CoercionFailed:
return NotImplemented
def __rmul__(f, g: Er) -> Self:
return f.__mul__(g)
def __truediv__(f, g: Self | Er) -> Self:
if isinstance(g, DMP):
return f.exquo(g) # type: ignore
else:
try:
# XXX: This should be dividing...
return f.mul_ground(g)
except CoercionFailed:
return NotImplemented
def __rtruediv__(f, g: Self | Er) -> Self:
if isinstance(g, DMP):
return g.exquo(f) # type: ignore
else:
try:
return f._one().mul_ground(g).exquo(f)
except CoercionFailed:
return NotImplemented
def __pow__(f, n: int) -> Self:
return f.pow(n)
def __divmod__(f, g: Self) -> tuple[Self, Self]:
return f.div(g)
def __mod__(f, g: Self) -> Self:
return f.rem(g)
def __floordiv__(f, g: Self | Er) -> Self:
if isinstance(g, DMP):
return f.quo(g) # type: ignore
else:
try:
return f.quo_ground(g)
except TypeError:
return NotImplemented
def __eq__(f, g: object) -> bool:
if f is g:
return True
if not isinstance(g, DMP):
return NotImplemented
try:
F, G = f.unify_DMP(g)
except UnificationFailed:
return False
else:
return F._strict_eq(G)
def _strict_eq(f, g: Self) -> bool:
raise NotImplementedError
def eq(f, g: Self, strict: bool = False) -> bool:
if not strict:
return f == g
else:
return f._strict_eq(g)
def ne(f, g: Self, strict: bool = False) -> bool:
return not f.eq(g, strict=strict)
def __lt__(f, g: Self) -> bool:
F, G = f.unify_DMP(g)
return F.to_list() < G.to_list()
def __le__(f, g: Self) -> bool:
F, G = f.unify_DMP(g)
return F.to_list() <= G.to_list()
def __gt__(f, g: Self) -> bool:
F, G = f.unify_DMP(g)
return F.to_list() > G.to_list()
def __ge__(f, g: Self) -> bool:
F, G = f.unify_DMP(g)
return F.to_list() >= G.to_list()
def __bool__(f) -> bool:
return not f.is_zero
# XXX: mypy complains below that dmp is not valid as a type for some reason.
# Pretty sure this is a bug in mypy but we work around it by defining a new
# type alias for dmp.
_T = TypeVar("_T")
_dmp2: TypeAlias = "list[_dmp2[_T]]"
| DMP |
python | apache__airflow | providers/google/tests/unit/google/marketing_platform/sensors/test_display_video.py | {
"start": 1202,
"end": 2627
} | class ____:
@mock.patch(f"{MODULE_NAME}.GoogleDisplayVideo360Hook")
@mock.patch(f"{MODULE_NAME}.BaseSensorOperator")
def test_poke(self, mock_base_op, hook_mock):
operation_name = "operation_name"
op = GoogleDisplayVideo360GetSDFDownloadOperationSensor(
operation_name=operation_name,
api_version=API_VERSION,
task_id="test_task",
)
op.poke(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=None,
)
hook_mock.return_value.get_sdf_download_operation.assert_called_once_with(
operation_name=operation_name
)
@mock.patch(f"{MODULE_NAME}.GoogleDisplayVideo360Hook")
@mock.patch(f"{MODULE_NAME}.BaseSensorOperator")
def test_poke_with_exception(
self,
mock_base_op,
hook_mock,
):
operation_name = "operation_name"
op = GoogleDisplayVideo360GetSDFDownloadOperationSensor(
operation_name=operation_name,
api_version=API_VERSION,
task_id="test_task",
)
hook_mock.return_value.get_sdf_download_operation.return_value = {"error": "error"}
with pytest.raises(AirflowException, match="The operation finished in error with error"):
op.poke(context={})
| TestGoogleDisplayVideo360Sensor |
python | getsentry__sentry | src/sentry/seer/models.py | {
"start": 1462,
"end": 1595
} | class ____(BaseModel):
trace_ids: list[str]
suggested_investigations: list[PageWebVitalsInsight]
| SummarizePageWebVitalsResponse |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.