language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | Netflix__metaflow | metaflow/plugins/azure/azure_secret_manager_secrets_provider.py | {
"start": 1172,
"end": 11107
} | class ____(SecretsProvider):
TYPE = "az-key-vault"
key_vault_domains = [
".vault.azure.net",
".vault.azure.cn",
".vault.usgovcloudapi.net",
".vault.microsoftazure.de",
]
supported_vault_object_types = ["secrets"]
# https://learn.microsoft.com/en-us/azure/key-vault/general/about-keys-secrets-certificates has details on vault name structure
# Vault name and Managed HSM pool name must be a 3-24 character string, containing only 0-9, a-z, A-Z, and not consecutive -.
def _is_valid_vault_name(self, vault_name):
vault_name_pattern = r"^(?!.*--)[a-zA-Z0-9-]{3,24}$"
return re.match(vault_name_pattern, vault_name) is not None
# The type of the object can be, "keys", "secrets", or "certificates".
# Currently only secrets will be supported
def _is_valid_object_type(self, secret_type):
for type in self.supported_vault_object_types:
if secret_type == type:
return True
return False
# The secret name must be a 1-127 character string, starting with a letter and containing only 0-9, a-z, A-Z, and -.
def _is_valid_secret_name(self, secret_name):
secret_name_pattern = r"^[a-zA-Z][a-zA-Z0-9-]{0,126}$"
return re.match(secret_name_pattern, secret_name) is not None
# An object-version is a system-generated, 32 character string identifier that is optionally used to address a unique version of an object.
def _is_valid_object_version(self, secret_version):
object_version_pattern = r"^[a-zA-Z0-9]{32}$"
return re.match(object_version_pattern, secret_version) is not None
# This function will check if the secret_id is fully qualified url. It will return True iff the secret_id is of the form:
# https://myvault.vault.azure.net/secrets/mysecret/ec96f02080254f109c51a1f14cdb1931 OR
# https://myvault.vault.azure.net/secrets/mysecret/
# validating the above as per recommendations in https://devblogs.microsoft.com/azure-sdk/guidance-for-applications-using-the-key-vault-libraries/
def _is_secret_id_fully_qualified_url(self, secret_id):
# if the secret_id is None/empty/does not start with https then return false
if secret_id is None or secret_id == "" or not secret_id.startswith("https://"):
return False
try:
parsed_vault_url = urlparse(secret_id)
except ValueError:
print("invalid vault url", file=sys.stderr)
return False
hostname = parsed_vault_url.netloc
k_v_domain_found = False
actual_k_v_domain = ""
for k_v_domain in self.key_vault_domains:
if k_v_domain in hostname:
k_v_domain_found = True
actual_k_v_domain = k_v_domain
break
if not k_v_domain_found:
# the secret_id started with https:// however the key_vault_domains
# were not present in the secret_id which means
raise MetaflowAzureKeyVaultBadVault("bad key vault domain %s" % secret_id)
# given the secret_id seems to have a valid key vault domain
# lets verify that the vault name corresponds to its regex.
vault_name = hostname[: -len(actual_k_v_domain)]
# verify the vault name pattern
if not self._is_valid_vault_name(vault_name):
raise MetaflowAzureKeyVaultBadVault("bad key vault name %s" % vault_name)
path_parts = parsed_vault_url.path.strip("/").split("/")
total_path_parts = len(path_parts)
if total_path_parts < 2 or total_path_parts > 3:
raise MetaflowAzureKeyVaultBadSecretPath(
"bad secret uri path %s" % path_parts
)
object_type = path_parts[0]
if not self._is_valid_object_type(object_type):
raise MetaflowAzureKeyVaultBadSecretType("bad secret type %s" % object_type)
secret_name = path_parts[1]
if not self._is_valid_secret_name(secret_name=secret_name):
raise MetaflowAzureKeyVaultBadSecretName("bad secret name %s" % secret_name)
if total_path_parts == 3:
if not self._is_valid_object_version(path_parts[2]):
raise MetaflowAzureKeyVaultBadSecretVersion(
"bad secret version %s" % path_parts[2]
)
return True
# This function will validate the correctness of the partial secret id.
# It will attempt to construct the fully qualified secret URL internally and
# call the _is_secret_id_fully_qualified_url to check validity
def _is_partial_secret_valid(self, secret_id):
secret_parts = secret_id.strip("/").split("/")
total_secret_parts = len(secret_parts)
if total_secret_parts < 1 or total_secret_parts > 2:
return False
# since the secret_id is supposedly a partial id, the AZURE_KEY_VAULT_PREFIX
# must be set.
if not AZURE_KEY_VAULT_PREFIX:
raise ValueError(
"cannot use simple secret id without setting METAFLOW_AZURE_KEY_VAULT_PREFIX. %s"
% AZURE_KEY_VAULT_PREFIX
)
domain = AZURE_KEY_VAULT_PREFIX.rstrip("/")
full_secret = "%s/secrets/%s" % (domain, secret_id)
if not self._is_secret_id_fully_qualified_url(full_secret):
return False
return True
def _sanitize_key_as_env_var(self, key):
"""
Sanitize a key as an environment variable name.
This is purely a convenience trade-off to cover common cases well, vs. introducing
ambiguities (e.g. did the final '_' come from '.', or '-' or is original?).
1/27/2023(jackie):
We start with few rules and should *sparingly* add more over time.
Also, it's TBD whether all possible providers will share the same sanitization logic.
Therefore we will keep this function private for now
"""
return key.replace("-", "_").replace(".", "_").replace("/", "_")
def get_secret_as_dict(self, secret_id, options={}, role=None):
# https://learn.microsoft.com/en-us/azure/app-service/app-service-key-vault-references?tabs=azure-cli has a lot of details on
# the patterns used in key vault
# Vault names and Managed HSM pool names are selected by the user and are globally unique.
# Vault name and Managed HSM pool name must be a 3-24 character string, containing only 0-9, a-z, A-Z, and not consecutive -.
# object-type The type of the object. As of 05/08/24 only "secrets", are supported
# object-name An object-name is a user provided name for and must be unique within a key vault. The name must be a 1-127 character string, starting with a letter and containing only 0-9, a-z, A-Z, and -.
# object-version An object-version is a system-generated, 32 character string identifier that is optionally used to address a unique version of an object.
# We allow these forms of secret_id:
#
# 1. Full path like https://<key-vault-name><.vault-domain>/secrets/<secret-name>/<secret-version>. This is what you
# see in Azure portal and is easy to copy paste.
#
# 2. Full path but without the version like https://<key-vault-name><.vault-domain>/secrets/<secret-name>
#
# 3. Simple string like mysecret. This corresponds to the SecretName.
#
# 4. Simple string with <secret-name>/<secret-version> suffix like mysecret/123
# The latter two forms require METAFLOW_AZURE_KEY_VAULT_PREFIX to be set.
# if the secret_id is None/empty/does not start with https then return false
if secret_id is None or secret_id == "":
raise MetaflowAzureKeyVaultBadSecret("empty secret id is not supported")
# check if the passed in secret is a short-form ( #3/#4 in the above comment)
if not secret_id.startswith("https://"):
# check if the secret_id is of form `secret_name` OR `secret_name/secret_version`
if not self._is_partial_secret_valid(secret_id=secret_id):
raise MetaflowAzureKeyVaultBadSecret(
"unsupported partial secret %s" % secret_id
)
domain = AZURE_KEY_VAULT_PREFIX.rstrip("/")
full_secret = "%s/secrets/%s" % (domain, secret_id)
# if the secret id is passed as a URL - then check if the url is fully qualified
if secret_id.startswith("https://"):
if not self._is_secret_id_fully_qualified_url(secret_id=secret_id):
raise MetaflowException("unsupported secret %s" % secret_id)
full_secret = secret_id
# at this point I know that the secret URL is good so we can start creating the Secret Client
az_credentials = create_cacheable_azure_credential()
res = urlparse(full_secret)
az_vault_url = "%s://%s" % (
res.scheme,
res.netloc,
) # https://myvault.vault.azure.net
secret_data = res.path.strip("/").split("/")[1:]
secret_name = secret_data[0]
secret_version = None
if len(secret_data) > 1:
secret_version = secret_data[1]
from azure.keyvault.secrets import SecretClient
client = SecretClient(vault_url=az_vault_url, credential=az_credentials)
key_vault_secret_val = client.get_secret(
name=secret_name, version=secret_version
)
result = {}
if options.get("env_var_name") is not None:
env_var_name = options["env_var_name"]
sanitized_key = self._sanitize_key_as_env_var(env_var_name)
else:
sanitized_key = self._sanitize_key_as_env_var(key_vault_secret_val.name)
response_payload = key_vault_secret_val.value
result[sanitized_key] = response_payload
return result
| AzureKeyVaultSecretsProvider |
python | pallets__click | src/click/shell_completion.py | {
"start": 5583,
"end": 9327
} | class ____:
"""Base class for providing shell completion support. A subclass for
a given shell will override attributes and methods to implement the
completion instructions (``source`` and ``complete``).
:param cli: Command being called.
:param prog_name: Name of the executable in the shell.
:param complete_var: Name of the environment variable that holds
the completion instruction.
.. versionadded:: 8.0
"""
name: t.ClassVar[str]
"""Name to register the shell as with :func:`add_completion_class`.
This is used in completion instructions (``{name}_source`` and
``{name}_complete``).
"""
source_template: t.ClassVar[str]
"""Completion script template formatted by :meth:`source`. This must
be provided by subclasses.
"""
def __init__(
self,
cli: Command,
ctx_args: cabc.MutableMapping[str, t.Any],
prog_name: str,
complete_var: str,
) -> None:
self.cli = cli
self.ctx_args = ctx_args
self.prog_name = prog_name
self.complete_var = complete_var
@property
def func_name(self) -> str:
"""The name of the shell function defined by the completion
script.
"""
safe_name = re.sub(r"\W*", "", self.prog_name.replace("-", "_"), flags=re.ASCII)
return f"_{safe_name}_completion"
def source_vars(self) -> dict[str, t.Any]:
"""Vars for formatting :attr:`source_template`.
By default this provides ``complete_func``, ``complete_var``,
and ``prog_name``.
"""
return {
"complete_func": self.func_name,
"complete_var": self.complete_var,
"prog_name": self.prog_name,
}
def source(self) -> str:
"""Produce the shell script that defines the completion
function. By default this ``%``-style formats
:attr:`source_template` with the dict returned by
:meth:`source_vars`.
"""
return self.source_template % self.source_vars()
def get_completion_args(self) -> tuple[list[str], str]:
"""Use the env vars defined by the shell script to return a
tuple of ``args, incomplete``. This must be implemented by
subclasses.
"""
raise NotImplementedError
def get_completions(self, args: list[str], incomplete: str) -> list[CompletionItem]:
"""Determine the context and last complete command or parameter
from the complete args. Call that object's ``shell_complete``
method to get the completions for the incomplete value.
:param args: List of complete args before the incomplete value.
:param incomplete: Value being completed. May be empty.
"""
ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args)
obj, incomplete = _resolve_incomplete(ctx, args, incomplete)
return obj.shell_complete(ctx, incomplete)
def format_completion(self, item: CompletionItem) -> str:
"""Format a completion item into the form recognized by the
shell script. This must be implemented by subclasses.
:param item: Completion item to format.
"""
raise NotImplementedError
def complete(self) -> str:
"""Produce the completion data to send back to the shell.
By default this calls :meth:`get_completion_args`, gets the
completions, then calls :meth:`format_completion` for each
completion.
"""
args, incomplete = self.get_completion_args()
completions = self.get_completions(args, incomplete)
out = [self.format_completion(item) for item in completions]
return "\n".join(out)
| ShellComplete |
python | sphinx-doc__sphinx | sphinx/roles.py | {
"start": 6800,
"end": 8324
} | class ____(ReferenceRole):
_BASE_URL: Final = 'https://www.cve.org/CVERecord?id=CVE-'
def run(self) -> tuple[list[Node], list[system_message]]:
target_id = f'index-{self.env.new_serialno("index")}'
entries = [
(
'single',
_('Common Vulnerabilities and Exposures; CVE %s') % self.target,
target_id,
'',
None,
)
]
index = addnodes.index(entries=entries)
target = nodes.target('', '', ids=[target_id])
self.inliner.document.note_explicit_target(target)
try:
refuri = self.build_uri()
reference = nodes.reference(
'', '', internal=False, refuri=refuri, classes=['cve']
)
if self.has_explicit_title:
reference += nodes.strong(self.title, self.title)
else:
title = f'CVE {self.title}'
reference += nodes.strong(title, title)
except ValueError:
msg = self.inliner.reporter.error(
__('invalid CVE number %s') % self.target, line=self.lineno
)
prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
return [prb], [msg]
return [index, target, reference], []
def build_uri(self) -> str:
ret = self.target.partition('#')
if ret[1]:
return f'{CVE._BASE_URL}{ret[0]}#{ret[2]}'
return f'{CVE._BASE_URL}{ret[0]}'
| CVE |
python | wandb__wandb | tests/unit_tests/test_step_prepare.py | {
"start": 1444,
"end": 1616
} | class ____:
now: float = 0
def __call__(self) -> float:
return self.now
def sleep(self, duration: float) -> None:
self.now += duration
| MockClock |
python | openai__openai-python | src/openai/types/responses/response_output_text.py | {
"start": 1837,
"end": 2272
} | class ____(BaseModel):
file_id: str
"""The ID of the file."""
index: int
"""The index of the file in the list of files."""
type: Literal["file_path"]
"""The type of the file path. Always `file_path`."""
Annotation: TypeAlias = Annotated[
Union[AnnotationFileCitation, AnnotationURLCitation, AnnotationContainerFileCitation, AnnotationFilePath],
PropertyInfo(discriminator="type"),
]
| AnnotationFilePath |
python | pyparsing__pyparsing | pyparsing/exceptions.py | {
"start": 610,
"end": 9382
} | class ____(Exception):
"""base exception class for all parsing runtime exceptions"""
loc: int
msg: str
pstr: str
parser_element: typing.Any # "ParserElement"
args: tuple[str, int, typing.Optional[str]]
__slots__ = (
"loc",
"msg",
"pstr",
"parser_element",
"args",
)
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(
self,
pstr: str,
loc: int = 0,
msg: typing.Optional[str] = None,
elem=None,
) -> None:
if msg is None:
msg, pstr = pstr, ""
self.loc = loc
self.msg = msg
self.pstr = pstr
self.parser_element = elem
self.args = (pstr, loc, msg)
@staticmethod
def explain_exception(exc: Exception, depth: int = 16) -> str:
"""
Method to take an exception and translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- exc - exception raised during parsing (need not be a ParseException, in support
of Python exceptions that might be raised in a parse action)
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
"""
import inspect
from .core import ParserElement
if depth is None:
depth = sys.getrecursionlimit()
ret: list[str] = []
if isinstance(exc, ParseBaseException):
ret.append(exc.line)
ret.append(f"{'^':>{exc.column}}")
ret.append(f"{type(exc).__name__}: {exc}")
if depth <= 0 or exc.__traceback__ is None:
return "\n".join(ret)
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
seen: set[int] = set()
for ff in callers[-depth:]:
frm = ff[0]
f_self = frm.f_locals.get("self", None)
if isinstance(f_self, ParserElement):
if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")):
continue
if id(f_self) in seen:
continue
seen.add(id(f_self))
self_type = type(f_self)
ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}")
elif f_self is not None:
self_type = type(f_self)
ret.append(f"{self_type.__module__}.{self_type.__name__}")
else:
code = frm.f_code
if code.co_name in ("wrapper", "<module>"):
continue
ret.append(code.co_name)
depth -= 1
if not depth:
break
return "\n".join(ret)
@classmethod
def _from_exception(cls, pe) -> ParseBaseException:
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parser_element)
@cached_property
def line(self) -> str:
"""
Return the line of text where the exception occurred.
"""
return line(self.loc, self.pstr)
@cached_property
def lineno(self) -> int:
"""
Return the 1-based line number of text where the exception occurred.
"""
return lineno(self.loc, self.pstr)
@cached_property
def col(self) -> int:
"""
Return the 1-based column on the line of text where the exception occurred.
"""
return col(self.loc, self.pstr)
@cached_property
def column(self) -> int:
"""
Return the 1-based column on the line of text where the exception occurred.
"""
return col(self.loc, self.pstr)
@cached_property
def found(self) -> str:
if not self.pstr:
return ""
if self.loc >= len(self.pstr):
return "end of text"
# pull out next word at error location
found_match = _exception_word_extractor.match(self.pstr, self.loc)
if found_match is not None:
found_text = found_match.group(0)
else:
found_text = self.pstr[self.loc : self.loc + 1]
return repr(found_text).replace(r"\\", "\\")
# pre-PEP8 compatibility
@property
def parserElement(self):
warnings.warn(
"parserElement is deprecated, use parser_element",
DeprecationWarning,
stacklevel=2,
)
return self.parser_element
@parserElement.setter
def parserElement(self, elem):
warnings.warn(
"parserElement is deprecated, use parser_element",
DeprecationWarning,
stacklevel=2,
)
self.parser_element = elem
def copy(self):
return copy.copy(self)
def formatted_message(self) -> str:
"""
Output the formatted exception message.
Can be overridden to customize the message formatting or contents.
.. versionadded:: 3.2.0
"""
found_phrase = f", found {self.found}" if self.found else ""
return f"{self.msg}{found_phrase} (at char {self.loc}), (line:{self.lineno}, col:{self.column})"
def __str__(self) -> str:
"""
.. versionchanged:: 3.2.0
Now uses :meth:`formatted_message` to format message.
"""
try:
return self.formatted_message()
except Exception as ex:
return (
f"{type(self).__name__}: {self.msg}"
f" ({type(ex).__name__}: {ex} while formatting message)"
)
def __repr__(self):
return str(self)
def mark_input_line(
self, marker_string: typing.Optional[str] = None, **kwargs
) -> str:
"""
Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
markerString: str = deprecate_argument(kwargs, "markerString", ">!<")
markerString = marker_string if marker_string is not None else markerString
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = f"{line_str[:line_column]}{markerString}{line_str[line_column:]}"
return line_str.strip()
def explain(self, depth: int = 16) -> str:
"""
Method to translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
Example:
.. testcode::
# an expression to parse 3 integers
expr = pp.Word(pp.nums) * 3
try:
# a failing parse - the third integer is prefixed with "A"
expr.parse_string("123 456 A789")
except pp.ParseException as pe:
print(pe.explain(depth=0))
prints:
.. testoutput::
123 456 A789
^
ParseException: Expected W:(0-9), found 'A789' (at char 8), (line:1, col:9)
Note: the diagnostic output will include string representations of the expressions
that failed to parse. These representations will be more helpful if you use `set_name` to
give identifiable names to your expressions. Otherwise they will use the default string
forms, which may be cryptic to read.
Note: pyparsing's default truncation of exception tracebacks may also truncate the
stack of expressions that are displayed in the ``explain`` output. To get the full listing
of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
"""
return self.explain_exception(self, depth)
# Compatibility synonyms
# fmt: off
markInputline = replaced_by_pep8("markInputline", mark_input_line)
# fmt: on
| ParseBaseException |
python | streamlit__streamlit | lib/streamlit/web/server/oauth_authlib_routes.py | {
"start": 2330,
"end": 3783
} | class ____(tornado.web.RequestHandler):
"""Mixin for handling auth cookies. Added for compatibility with Tornado < 6.3.0."""
def initialize(self, base_url: str) -> None:
self.base_url = base_url
def redirect_to_base(self) -> None:
self.redirect(make_url_path(self.base_url, "/"))
def set_auth_cookie(self, user_info: dict[str, Any]) -> None:
serialized_cookie_value = json.dumps(user_info)
# log error if cookie value is larger than 4096 bytes
if len(serialized_cookie_value.encode()) > 4096:
_LOGGER.error(
"Authentication cookie size exceeds maximum browser limit of 4096 bytes. Authentication may fail."
)
try:
# We don't specify Tornado secure flag here because it leads to missing cookie on Safari.
# The OIDC flow should work only on secure context anyway (localhost or HTTPS),
# so specifying the secure flag here will not add anything in terms of security.
self.set_signed_cookie(
AUTH_COOKIE_NAME,
serialized_cookie_value,
httpOnly=True,
)
except AttributeError:
self.set_secure_cookie(
AUTH_COOKIE_NAME,
serialized_cookie_value,
httponly=True,
)
def clear_auth_cookie(self) -> None:
self.clear_cookie(AUTH_COOKIE_NAME)
| AuthHandlerMixin |
python | pytest-dev__pytest | testing/test_conftest.py | {
"start": 17520,
"end": 25095
} | class ____:
def _setup_tree(self, pytester: Pytester) -> dict[str, Path]: # for issue616
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
runner = pytester.mkdir("empty")
package = pytester.mkdir("package")
package.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fxtr():
return "from-package"
"""
),
encoding="utf-8",
)
package.joinpath("test_pkgroot.py").write_text(
textwrap.dedent(
"""\
def test_pkgroot(fxtr):
assert fxtr == "from-package"
"""
),
encoding="utf-8",
)
swc = package.joinpath("swc")
swc.mkdir()
swc.joinpath("__init__.py").touch()
swc.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fxtr():
return "from-swc"
"""
),
encoding="utf-8",
)
swc.joinpath("test_with_conftest.py").write_text(
textwrap.dedent(
"""\
def test_with_conftest(fxtr):
assert fxtr == "from-swc"
"""
),
encoding="utf-8",
)
snc = package.joinpath("snc")
snc.mkdir()
snc.joinpath("__init__.py").touch()
snc.joinpath("test_no_conftest.py").write_text(
textwrap.dedent(
"""\
def test_no_conftest(fxtr):
assert fxtr == "from-package" # No local conftest.py, so should
# use value from parent dir's
"""
),
encoding="utf-8",
)
print("created directory structure:")
for x in pytester.path.glob("**/"):
print(" " + str(x.relative_to(pytester.path)))
return {"runner": runner, "package": package, "swc": swc, "snc": snc}
# N.B.: "swc" stands for "subdir with conftest.py"
# "snc" stands for "subdir no [i.e. without] conftest.py"
@pytest.mark.parametrize(
"chdir,testarg,expect_ntests_passed",
[
# Effective target: package/..
("runner", "..", 3),
("package", "..", 3),
("swc", "../..", 3),
("snc", "../..", 3),
# Effective target: package
("runner", "../package", 3),
("package", ".", 3),
("swc", "..", 3),
("snc", "..", 3),
# Effective target: package/swc
("runner", "../package/swc", 1),
("package", "./swc", 1),
("swc", ".", 1),
("snc", "../swc", 1),
# Effective target: package/snc
("runner", "../package/snc", 1),
("package", "./snc", 1),
("swc", "../snc", 1),
("snc", ".", 1),
],
)
def test_parsefactories_relative_node_ids(
self, pytester: Pytester, chdir: str, testarg: str, expect_ntests_passed: int
) -> None:
"""#616"""
dirs = self._setup_tree(pytester)
print(f"pytest run in cwd: {dirs[chdir].relative_to(pytester.path)}")
print(f"pytestarg : {testarg}")
print(f"expected pass : {expect_ntests_passed}")
os.chdir(dirs[chdir])
reprec = pytester.inline_run(
testarg,
"-q",
"--traceconfig",
"--confcutdir",
pytester.path,
)
reprec.assertoutcome(passed=expect_ntests_passed)
@pytest.mark.parametrize(
"confcutdir,passed,error", [(".", 2, 0), ("src", 1, 1), (None, 1, 1)]
)
def test_search_conftest_up_to_inifile(
pytester: Pytester, confcutdir: str, passed: int, error: int
) -> None:
"""Test that conftest files are detected only up to a configuration file, unless
an explicit --confcutdir option is given.
"""
root = pytester.path
src = root.joinpath("src")
src.mkdir()
src.joinpath("pytest.ini").write_text("[pytest]", encoding="utf-8")
src.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def fix1(): pass
"""
),
encoding="utf-8",
)
src.joinpath("test_foo.py").write_text(
textwrap.dedent(
"""\
def test_1(fix1):
pass
def test_2(out_of_reach):
pass
"""
),
encoding="utf-8",
)
root.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.fixture
def out_of_reach(): pass
"""
),
encoding="utf-8",
)
args = [str(src)]
if confcutdir:
args = [f"--confcutdir={root.joinpath(confcutdir)}"]
result = pytester.runpytest(*args)
match = ""
if passed:
match += f"*{passed} passed*"
if error:
match += f"*{error} error*"
result.stdout.fnmatch_lines(match)
def test_issue1073_conftest_special_objects(pytester: Pytester) -> None:
pytester.makeconftest(
"""\
class DontTouchMe(object):
def __getattr__(self, x):
raise Exception('cant touch me')
x = DontTouchMe()
"""
)
pytester.makepyfile(
"""\
def test_some():
pass
"""
)
res = pytester.runpytest()
assert res.ret == 0
def test_conftest_exception_handling(pytester: Pytester) -> None:
pytester.makeconftest(
"""\
raise ValueError()
"""
)
pytester.makepyfile(
"""\
def test_some():
pass
"""
)
res = pytester.runpytest()
assert res.ret == 4
assert "raise ValueError()" in [line.strip() for line in res.errlines]
def test_hook_proxy(pytester: Pytester) -> None:
"""Session's gethookproxy() would cache conftests incorrectly (#2016).
It was decided to remove the cache altogether.
"""
pytester.makepyfile(
**{
"root/demo-0/test_foo1.py": "def test1(): pass",
"root/demo-a/test_foo2.py": "def test1(): pass",
"root/demo-a/conftest.py": """\
def pytest_ignore_collect(collection_path, config):
return True
""",
"root/demo-b/test_foo3.py": "def test1(): pass",
"root/demo-c/test_foo4.py": "def test1(): pass",
}
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
["*test_foo1.py*", "*test_foo3.py*", "*test_foo4.py*", "*3 passed*"]
)
def test_required_option_help(pytester: Pytester) -> None:
pytester.makeconftest("assert 0")
x = pytester.mkdir("x")
x.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true", required=True)
"""
),
encoding="utf-8",
)
result = pytester.runpytest("-h", x)
result.stdout.no_fnmatch_line("*argument --xyz is required*")
assert "general:" in result.stdout.str()
| TestConftestVisibility |
python | sympy__sympy | sympy/sets/fancysets.py | {
"start": 36460,
"end": 44783
} | class ____(Set):
r"""
Represents the Set of all Complex Numbers. It can represent a
region of Complex Plane in both the standard forms Polar and
Rectangular coordinates.
* Polar Form
Input is in the form of the ProductSet or Union of ProductSets
of the intervals of ``r`` and ``theta``, and use the flag ``polar=True``.
.. math:: Z = \{z \in \mathbb{C} \mid z = r\times (\cos(\theta) + I\sin(\theta)), r \in [\texttt{r}], \theta \in [\texttt{theta}]\}
* Rectangular Form
Input is in the form of the ProductSet or Union of ProductSets
of interval of x and y, the real and imaginary parts of the Complex numbers in a plane.
Default input type is in rectangular form.
.. math:: Z = \{z \in \mathbb{C} \mid z = x + Iy, x \in [\operatorname{re}(z)], y \in [\operatorname{im}(z)]\}
Examples
========
>>> from sympy import ComplexRegion, Interval, S, I, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 6)
>>> c1 = ComplexRegion(a*b) # Rectangular Form
>>> c1
CartesianComplexRegion(ProductSet(Interval(2, 3), Interval(4, 6)))
* c1 represents the rectangular region in complex plane
surrounded by the coordinates (2, 4), (3, 4), (3, 6) and
(2, 6), of the four vertices.
>>> c = Interval(1, 8)
>>> c2 = ComplexRegion(Union(a*b, b*c))
>>> c2
CartesianComplexRegion(Union(ProductSet(Interval(2, 3), Interval(4, 6)), ProductSet(Interval(4, 6), Interval(1, 8))))
* c2 represents the Union of two rectangular regions in complex
plane. One of them surrounded by the coordinates of c1 and
other surrounded by the coordinates (4, 1), (6, 1), (6, 8) and
(4, 8).
>>> 2.5 + 4.5*I in c1
True
>>> 2.5 + 6.5*I in c1
False
>>> r = Interval(0, 1)
>>> theta = Interval(0, 2*S.Pi)
>>> c2 = ComplexRegion(r*theta, polar=True) # Polar Form
>>> c2 # unit Disk
PolarComplexRegion(ProductSet(Interval(0, 1), Interval.Ropen(0, 2*pi)))
* c2 represents the region in complex plane inside the
Unit Disk centered at the origin.
>>> 0.5 + 0.5*I in c2
True
>>> 1 + 2*I in c2
False
>>> unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True)
>>> upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True)
>>> intersection = unit_disk.intersect(upper_half_unit_disk)
>>> intersection
PolarComplexRegion(ProductSet(Interval(0, 1), Interval(0, pi)))
>>> intersection == upper_half_unit_disk
True
See Also
========
CartesianComplexRegion
PolarComplexRegion
Complexes
"""
is_ComplexRegion = True
def __new__(cls, sets, polar=False):
if polar is False:
return CartesianComplexRegion(sets)
elif polar is True:
return PolarComplexRegion(sets)
else:
raise ValueError("polar should be either True or False")
@property
def sets(self):
"""
Return raw input sets to the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.sets
ProductSet(Interval(2, 3), Interval(4, 5))
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.sets
Union(ProductSet(Interval(2, 3), Interval(4, 5)), ProductSet(Interval(4, 5), Interval(1, 7)))
"""
return self.args[0]
@property
def psets(self):
"""
Return a tuple of sets (ProductSets) input of the self.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.psets
(ProductSet(Interval(2, 3), Interval(4, 5)),)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.psets
(ProductSet(Interval(2, 3), Interval(4, 5)), ProductSet(Interval(4, 5), Interval(1, 7)))
"""
if self.sets.is_ProductSet:
psets = ()
psets = psets + (self.sets, )
else:
psets = self.sets.args
return psets
@property
def a_interval(self):
"""
Return the union of intervals of `x` when, self is in
rectangular form, or the union of intervals of `r` when
self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.a_interval
Interval(2, 3)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.a_interval
Union(Interval(2, 3), Interval(4, 5))
"""
a_interval = []
for element in self.psets:
a_interval.append(element.args[0])
a_interval = Union(*a_interval)
return a_interval
@property
def b_interval(self):
"""
Return the union of intervals of `y` when, self is in
rectangular form, or the union of intervals of `theta`
when self is in polar form.
Examples
========
>>> from sympy import Interval, ComplexRegion, Union
>>> a = Interval(2, 3)
>>> b = Interval(4, 5)
>>> c = Interval(1, 7)
>>> C1 = ComplexRegion(a*b)
>>> C1.b_interval
Interval(4, 5)
>>> C2 = ComplexRegion(Union(a*b, b*c))
>>> C2.b_interval
Interval(1, 7)
"""
b_interval = []
for element in self.psets:
b_interval.append(element.args[1])
b_interval = Union(*b_interval)
return b_interval
@property
def _measure(self):
"""
The measure of self.sets.
Examples
========
>>> from sympy import Interval, ComplexRegion, S
>>> a, b = Interval(2, 5), Interval(4, 8)
>>> c = Interval(0, 2*S.Pi)
>>> c1 = ComplexRegion(a*b)
>>> c1.measure
12
>>> c2 = ComplexRegion(a*c, polar=True)
>>> c2.measure
6*pi
"""
return self.sets._measure
def _kind(self):
return self.args[0].kind
@classmethod
def from_real(cls, sets):
"""
Converts given subset of real numbers to a complex region.
Examples
========
>>> from sympy import Interval, ComplexRegion
>>> unit = Interval(0,1)
>>> ComplexRegion.from_real(unit)
CartesianComplexRegion(ProductSet(Interval(0, 1), {0}))
"""
if not sets.is_subset(S.Reals):
raise ValueError("sets must be a subset of the real line")
return CartesianComplexRegion(sets * FiniteSet(0))
def _contains(self, other):
from sympy.functions import arg, Abs
isTuple = isinstance(other, Tuple)
if isTuple and len(other) != 2:
raise ValueError('expecting Tuple of length 2')
# If the other is not an Expression, and neither a Tuple
if not isinstance(other, (Expr, Tuple)):
return S.false
# self in rectangular form
if not self.polar:
re, im = other if isTuple else other.as_real_imag()
return tfn[fuzzy_or(fuzzy_and([
pset.args[0]._contains(re),
pset.args[1]._contains(im)])
for pset in self.psets)]
# self in polar form
elif self.polar:
if other.is_zero:
# ignore undefined complex argument
return tfn[fuzzy_or(pset.args[0]._contains(S.Zero)
for pset in self.psets)]
if isTuple:
r, theta = other
else:
r, theta = Abs(other), arg(other)
if theta.is_real and theta.is_number:
# angles in psets are normalized to [0, 2pi)
theta %= 2*S.Pi
return tfn[fuzzy_or(fuzzy_and([
pset.args[0]._contains(r),
pset.args[1]._contains(theta)])
for pset in self.psets)]
| ComplexRegion |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 74971,
"end": 78178
} | class ____(BiffRecord):
"""
This record is part of the Page Settings Block. It specifies the
page header string for the current worksheet. If this record is not
present or completely empty (record size is 0), the sheet does not
contain a page header.
Record HEADER for non-empty page header, BIFF2-BIFF8:
Offset Size Contents
0 var. Page header string
BIFF2-BIFF7: Non-empty byte string, 8bit string
length
BIFF8: Non-empty Unicode string, 16bit string length
The header string may contain special commands, i.e. placeholders for
the page number, current date, or text formatting attributes. These
fields are represented by single letters (exception: font name and
size, see below) with a leading ampersand ("&"). If the ampersand
is part of the regular header text, it will be duplicated ("&&"). The
page header is divided into 3 sections: the left, the centred, and the
right section. Each section is introduced by a special command. All
text and all commands following are part of the selected section. Each
section starts with the text formatting specified in the default font
(first FONT record in the file). Active formatting attributes from
a previous section do not go into the next section.
The following table shows all available commands:
Command Contents
&& The "&" character itself
&L Start of the left section
&C Start of the centred section
&R Start of the right section
&P Current page number
&N Page count
&D Current date
&T Current time
&A Sheet name (BIFF5-BIFF8)
&F File name without path
&Z File path without file name (BIFF8X)
&G Picture (BIFF8X)
&B Bold on/off (BIFF2-BIFF4)
&I Italic on/off (BIFF2-BIFF4)
&U Underlining on/off
&E Double underlining on/off (BIFF5-BIFF8)
&S Strikeout on/off
&X Superscript on/off (BIFF5-BIFF8)
&Y Subscript on/off (BIFF5-BIFF8)
&"<fontname>" Set new font <fontname>
&"<fontname>,<fontstyle>"
Set new font with specified style <fontstyle>.
The style <fontstyle> is in most cases one of
"Regular", "Bold", "Italic", or "Bold Italic".
But this setting is dependent on the used font,
it may differ (localised style names, or "Standard",
"Oblique", ...). (BIFF5-BIFF8)
&<fontheight> Set font height in points (<fontheight> is a decimal value).
If this command is followed by a plain number to be printed
in the header, it will be separated from the font height
with a space character.
"""
_REC_ID = 0x0014
def __init__(self, header_str):
self._rec_data = upack2(header_str)
| HeaderRecord |
python | realpython__materials | python-maze-solver/source_code_final/src/maze_solver/models/square.py | {
"start": 147,
"end": 251
} | class ____:
index: int
row: int
column: int
border: Border
role: Role = Role.NONE
| Square |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_iter.py | {
"start": 3103,
"end": 3231
} | class ____:
def __init__(self, s):
self.s = s
def __getitem__(self, i):
return self.s[i]
| SequenceProxyClass |
python | PyCQA__pylint | tests/functional/t/too/too_few_public_methods_37.py | {
"start": 682,
"end": 934
} | class ____:
"""A three dimensional point with x, y and z components."""
attr1: float
attr2: float
attr3: float
def to_array(self):
"""Convert to a NumPy array `np.array((x, y, z))`."""
return self.attr1
@define
| Point |
python | ray-project__ray | rllib/models/preprocessors.py | {
"start": 3984,
"end": 5599
} | class ____(Preprocessor):
"""Generic image preprocessor.
Note: for Atari games, use config {"preprocessor_pref": "deepmind"}
instead for deepmind-style Atari preprocessing.
"""
@override(Preprocessor)
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
self._grayscale = options.get("grayscale")
self._zero_mean = options.get("zero_mean")
self._dim = options.get("dim")
if self._grayscale:
shape = (self._dim, self._dim, 1)
else:
shape = (self._dim, self._dim, 3)
return shape
@override(Preprocessor)
def transform(self, observation: TensorType) -> np.ndarray:
"""Downsamples images from (210, 160, 3) by the configured factor."""
self.check_shape(observation)
scaled = observation[25:-25, :, :]
if self._dim < 84:
scaled = resize(scaled, height=84, width=84)
# OpenAI: Resize by half, then down to 42x42 (essentially mipmapping).
# If we resize directly we lose pixels that, when mapped to 42x42,
# aren't close enough to the pixel boundary.
scaled = resize(scaled, height=self._dim, width=self._dim)
if self._grayscale:
scaled = scaled.mean(2)
scaled = scaled.astype(np.float32)
# Rescale needed for maintaining 1 channel
scaled = np.reshape(scaled, [self._dim, self._dim, 1])
if self._zero_mean:
scaled = (scaled - 128) / 128
else:
scaled *= 1.0 / 255.0
return scaled
@OldAPIStack
| GenericPixelPreprocessor |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/python.py | {
"start": 3860,
"end": 21797
} | class ____(Task.Task):
color = 'PINK'
def __str__(self):
node = self.outputs[0]
return node.path_from(node.ctx.launch_node())
def run(self):
cmd = [
Utils.subst_vars('${PYTHON}', self.env),
Utils.subst_vars('${PYFLAGS_OPT}', self.env), '-c', INST, self.inputs[0].abspath(),
self.outputs[0].abspath(), self.pyd
]
ret = self.generator.bld.exec_command(cmd)
return ret
@feature('pyext')
@before_method('propagate_uselib_vars', 'apply_link')
@after_method('apply_bundle')
def init_pyext(self):
self.uselib = self.to_list(getattr(self, 'uselib', []))
if not 'PYEXT' in self.uselib:
self.uselib.append('PYEXT')
self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN = self.env.pyext_PATTERN
self.env.fcshlib_PATTERN = self.env.dshlib_PATTERN = self.env.pyext_PATTERN
try:
if not self.install_path:
return
except AttributeError:
self.install_path = '${PYTHONARCHDIR}'
@feature('pyext')
@before_method('apply_link', 'apply_bundle')
def set_bundle(self):
if Utils.unversioned_sys_platform() == 'darwin':
self.mac_bundle = True
@before_method('propagate_uselib_vars')
@feature('pyembed')
def init_pyembed(self):
self.uselib = self.to_list(getattr(self, 'uselib', []))
if not 'PYEMBED' in self.uselib:
self.uselib.append('PYEMBED')
@conf
def get_python_variables(self, variables, imports=None):
if not imports:
try:
imports = self.python_imports
except AttributeError:
imports = DISTUTILS_IMP
program = list(imports)
program.append('')
for v in variables:
program.append("print(repr(%s))" % v)
os_env = dict(os.environ)
try:
del os_env['MACOSX_DEPLOYMENT_TARGET']
except KeyError:
pass
try:
out = self.cmd_and_log(self.env.PYTHON + ['-c', '\n'.join(program)], env=os_env)
except Errors.WafError:
self.fatal('The distutils module is unusable: install "python-devel"?')
self.to_log(out)
return_values = []
for s in out.splitlines():
s = s.strip()
if not s:
continue
if s == 'None':
return_values.append(None)
elif (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'):
return_values.append(eval(s))
elif s[0].isdigit():
return_values.append(int(s))
else:
break
return return_values
@conf
def test_pyembed(self, mode, msg='Testing pyembed configuration'):
self.check(
header_name='Python.h',
define_name='HAVE_PYEMBED',
msg=msg,
fragment=FRAG,
errmsg='Could not build a python embedded interpreter',
features='%s %sprogram pyembed' % (mode, mode)
)
@conf
def test_pyext(self, mode, msg='Testing pyext configuration'):
self.check(
header_name='Python.h',
define_name='HAVE_PYEXT',
msg=msg,
fragment=FRAG,
errmsg='Could not build python extensions',
features='%s %sshlib pyext' % (mode, mode)
)
@conf
def python_cross_compile(self, features='pyembed pyext'):
features = Utils.to_list(features)
if not (
'PYTHON_LDFLAGS' in self.environ or 'PYTHON_PYEXT_LDFLAGS' in self.environ
or 'PYTHON_PYEMBED_LDFLAGS' in self.environ
):
return False
for x in 'PYTHON_VERSION PYTAG pyext_PATTERN'.split():
if not x in self.environ:
self.fatal('Please set %s in the os environment' % x)
else:
self.env[x] = self.environ[x]
xx = self.env.CXX_NAME and 'cxx' or 'c'
if 'pyext' in features:
flags = self.environ.get('PYTHON_PYEXT_LDFLAGS', self.environ.get('PYTHON_LDFLAGS'))
if flags is None:
self.fatal('No flags provided through PYTHON_PYEXT_LDFLAGS as required')
else:
self.parse_flags(flags, 'PYEXT')
self.test_pyext(xx)
if 'pyembed' in features:
flags = self.environ.get('PYTHON_PYEMBED_LDFLAGS', self.environ.get('PYTHON_LDFLAGS'))
if flags is None:
self.fatal('No flags provided through PYTHON_PYEMBED_LDFLAGS as required')
else:
self.parse_flags(flags, 'PYEMBED')
self.test_pyembed(xx)
return True
@conf
def check_python_headers(conf, features='pyembed pyext'):
features = Utils.to_list(features)
assert ('pyembed' in features
) or ('pyext' in features), "check_python_headers features must include 'pyembed' and/or 'pyext'"
env = conf.env
if not env.CC_NAME and not env.CXX_NAME:
conf.fatal('load a compiler first (gcc, g++, ..)')
if conf.python_cross_compile(features):
return
if not env.PYTHON_VERSION:
conf.check_python_version()
pybin = env.PYTHON
if not pybin:
conf.fatal('Could not find the python executable')
v = 'prefix SO LDFLAGS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET LDSHARED CFLAGS LDVERSION'.split(
)
try:
lst = conf.get_python_variables(["get_config_var('%s') or ''" % x for x in v])
except RuntimeError:
conf.fatal("Python development headers not found (-v for details).")
vals = ['%s = %r' % (x, y) for (x, y) in zip(v, lst)]
conf.to_log("Configuration returned from %r:\n%s\n" % (pybin, '\n'.join(vals)))
dct = dict(zip(v, lst))
x = 'MACOSX_DEPLOYMENT_TARGET'
if dct[x]:
env[x] = conf.environ[x] = dct[x]
env.pyext_PATTERN = '%s' + dct['SO']
num = '.'.join(env.PYTHON_VERSION.split('.')[:2])
conf.find_program([
''.join(pybin) + '-config',
'python%s-config' % num,
'python-config-%s' % num,
'python%sm-config' % num
],
var='PYTHON_CONFIG',
msg="python-config",
mandatory=False)
if env.PYTHON_CONFIG:
if conf.env.HAVE_PYTHON_H:
return
all_flags = [['--cflags', '--libs', '--ldflags']]
if sys.hexversion < 0x2070000:
all_flags = [[k] for k in all_flags[0]]
xx = env.CXX_NAME and 'cxx' or 'c'
if 'pyembed' in features:
for flags in all_flags:
embedflags = flags + ['--embed']
try:
conf.check_cfg(
msg='Asking python-config for pyembed %r flags' % ' '.join(embedflags),
path=env.PYTHON_CONFIG,
package='',
uselib_store='PYEMBED',
args=embedflags
)
except conf.errors.ConfigurationError:
conf.check_cfg(
msg='Asking python-config for pyembed %r flags' % ' '.join(flags),
path=env.PYTHON_CONFIG,
package='',
uselib_store='PYEMBED',
args=flags
)
try:
conf.test_pyembed(xx)
except conf.errors.ConfigurationError:
if dct['Py_ENABLE_SHARED'] and dct['LIBDIR']:
env.append_unique('LIBPATH_PYEMBED', [dct['LIBDIR']])
conf.test_pyembed(xx)
else:
raise
if 'pyext' in features:
for flags in all_flags:
conf.check_cfg(
msg='Asking python-config for pyext %r flags' % ' '.join(flags),
path=env.PYTHON_CONFIG,
package='',
uselib_store='PYEXT',
args=flags
)
try:
conf.test_pyext(xx)
except conf.errors.ConfigurationError:
if dct['Py_ENABLE_SHARED'] and dct['LIBDIR']:
env.append_unique('LIBPATH_PYEXT', [dct['LIBDIR']])
conf.test_pyext(xx)
else:
raise
conf.define('HAVE_PYTHON_H', 1)
return
all_flags = dct['LDFLAGS'] + ' ' + dct['CFLAGS']
conf.parse_flags(all_flags, 'PYEMBED')
all_flags = dct['LDFLAGS'] + ' ' + dct['LDSHARED'] + ' ' + dct['CFLAGS']
conf.parse_flags(all_flags, 'PYEXT')
result = None
if not dct["LDVERSION"]:
dct["LDVERSION"] = env.PYTHON_VERSION
for name in (
'python' + dct['LDVERSION'], 'python' + env.PYTHON_VERSION + 'm',
'python' + env.PYTHON_VERSION.replace('.', '')
):
if not result and env.LIBPATH_PYEMBED:
path = env.LIBPATH_PYEMBED
conf.to_log("\n\n# Trying default LIBPATH_PYEMBED: %r\n" % path)
result = conf.check(
lib=name,
uselib='PYEMBED',
libpath=path,
mandatory=False,
msg='Checking for library %s in LIBPATH_PYEMBED' % name
)
if not result and dct['LIBDIR']:
path = [dct['LIBDIR']]
conf.to_log("\n\n# try again with -L$python_LIBDIR: %r\n" % path)
result = conf.check(
lib=name,
uselib='PYEMBED',
libpath=path,
mandatory=False,
msg='Checking for library %s in LIBDIR' % name
)
if not result and dct['LIBPL']:
path = [dct['LIBPL']]
conf.to_log(
"\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n"
)
result = conf.check(
lib=name,
uselib='PYEMBED',
libpath=path,
mandatory=False,
msg='Checking for library %s in python_LIBPL' % name
)
if not result:
path = [os.path.join(dct['prefix'], "libs")]
conf.to_log("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n")
result = conf.check(
lib=name,
uselib='PYEMBED',
libpath=path,
mandatory=False,
msg='Checking for library %s in $prefix/libs' % name
)
if result:
break
if result:
env.LIBPATH_PYEMBED = path
env.append_value('LIB_PYEMBED', [name])
else:
conf.to_log("\n\n### LIB NOT FOUND\n")
if Utils.is_win32 or dct['Py_ENABLE_SHARED']:
env.LIBPATH_PYEXT = env.LIBPATH_PYEMBED
env.LIB_PYEXT = env.LIB_PYEMBED
conf.to_log("Include path for Python extensions (found via distutils module): %r\n" % (dct['INCLUDEPY'],))
env.INCLUDES_PYEXT = [dct['INCLUDEPY']]
env.INCLUDES_PYEMBED = [dct['INCLUDEPY']]
if env.CC_NAME == 'gcc':
env.append_unique('CFLAGS_PYEMBED', ['-fno-strict-aliasing'])
env.append_unique('CFLAGS_PYEXT', ['-fno-strict-aliasing'])
if env.CXX_NAME == 'gcc':
env.append_unique('CXXFLAGS_PYEMBED', ['-fno-strict-aliasing'])
env.append_unique('CXXFLAGS_PYEXT', ['-fno-strict-aliasing'])
if env.CC_NAME == "msvc":
from distutils.msvccompiler import MSVCCompiler
dist_compiler = MSVCCompiler()
dist_compiler.initialize()
env.append_value('CFLAGS_PYEXT', dist_compiler.compile_options)
env.append_value('CXXFLAGS_PYEXT', dist_compiler.compile_options)
env.append_value('LINKFLAGS_PYEXT', dist_compiler.ldflags_shared)
conf.check(
header_name='Python.h',
define_name='HAVE_PYTHON_H',
uselib='PYEMBED',
fragment=FRAG,
errmsg='Distutils not installed? Broken python installation? Get python-config now!'
)
@conf
def check_python_version(conf, minver=None):
assert minver is None or isinstance(minver, tuple)
pybin = conf.env.PYTHON
if not pybin:
conf.fatal('could not find the python executable')
cmd = pybin + ['-c', 'import sys\nfor x in sys.version_info: print(str(x))']
Logs.debug('python: Running python command %r', cmd)
lines = conf.cmd_and_log(cmd).split()
assert len(lines) == 5, "found %r lines, expected 5: %r" % (len(lines), lines)
pyver_tuple = (int(lines[0]), int(lines[1]), int(lines[2]), lines[3], int(lines[4]))
result = (minver is None) or (pyver_tuple >= minver)
if result:
pyver = '.'.join([str(x) for x in pyver_tuple[:2]])
conf.env.PYTHON_VERSION = pyver
if 'PYTHONDIR' in conf.env:
pydir = conf.env.PYTHONDIR
elif 'PYTHONDIR' in conf.environ:
pydir = conf.environ['PYTHONDIR']
else:
if Utils.is_win32:
(python_LIBDEST, pydir) = conf.get_python_variables([
"get_config_var('LIBDEST') or ''", "get_python_lib(standard_lib=0) or ''"
])
else:
python_LIBDEST = None
(pydir,) = conf.get_python_variables([
"get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env.PREFIX
])
if python_LIBDEST is None:
if conf.env.LIBDIR:
python_LIBDEST = os.path.join(conf.env.LIBDIR, 'python' + pyver)
else:
python_LIBDEST = os.path.join(conf.env.PREFIX, 'lib', 'python' + pyver)
if 'PYTHONARCHDIR' in conf.env:
pyarchdir = conf.env.PYTHONARCHDIR
elif 'PYTHONARCHDIR' in conf.environ:
pyarchdir = conf.environ['PYTHONARCHDIR']
else:
(pyarchdir,) = conf.get_python_variables([
"get_python_lib(plat_specific=1, standard_lib=0, prefix=%r) or ''" % conf.env.PREFIX
])
if not pyarchdir:
pyarchdir = pydir
if hasattr(conf, 'define'):
conf.define('PYTHONDIR', pydir)
conf.define('PYTHONARCHDIR', pyarchdir)
conf.env.PYTHONDIR = pydir
conf.env.PYTHONARCHDIR = pyarchdir
pyver_full = '.'.join(map(str, pyver_tuple[:3]))
if minver is None:
conf.msg('Checking for python version', pyver_full)
else:
minver_str = '.'.join(map(str, minver))
conf.msg('Checking for python version >= %s' % (minver_str,), pyver_full, color=result and 'GREEN' or 'YELLOW')
if not result:
conf.fatal('The python version is too old, expecting %r' % (minver,))
PYTHON_MODULE_TEMPLATE = '''
import %s as current_module
version = getattr(current_module, '__version__', None)
if version is not None:
print(str(version))
else:
print('unknown version')
'''
@conf
def check_python_module(conf, module_name, condition=''):
msg = "Checking for python module %r" % module_name
if condition:
msg = '%s (%s)' % (msg, condition)
conf.start_msg(msg)
try:
ret = conf.cmd_and_log(conf.env.PYTHON + ['-c', PYTHON_MODULE_TEMPLATE % module_name])
except Errors.WafError:
conf.end_msg(False)
conf.fatal('Could not find the python module %r' % module_name)
ret = ret.strip()
if condition:
conf.end_msg(ret)
if ret == 'unknown version':
conf.fatal('Could not check the %s version' % module_name)
from distutils.version import LooseVersion
def num(*k):
if isinstance(k[0], int):
return LooseVersion('.'.join([str(x) for x in k]))
else:
return LooseVersion(k[0])
d = {'num': num, 'ver': LooseVersion(ret)}
ev = eval(condition, {}, d)
if not ev:
conf.fatal('The %s version does not satisfy the requirements' % module_name)
else:
if ret == 'unknown version':
conf.end_msg(True)
else:
conf.end_msg(ret)
def configure(conf):
v = conf.env
if getattr(Options.options, 'pythondir', None):
v.PYTHONDIR = Options.options.pythondir
if getattr(Options.options, 'pythonarchdir', None):
v.PYTHONARCHDIR = Options.options.pythonarchdir
if getattr(Options.options, 'nopycache', None):
v.NOPYCACHE = Options.options.nopycache
if not v.PYTHON:
v.PYTHON = [getattr(Options.options, 'python', None) or sys.executable]
v.PYTHON = Utils.to_list(v.PYTHON)
conf.find_program('python', var='PYTHON')
v.PYFLAGS = ''
v.PYFLAGS_OPT = '-O'
v.PYC = getattr(Options.options, 'pyc', 1)
v.PYO = getattr(Options.options, 'pyo', 1)
try:
v.PYTAG = conf.cmd_and_log(
conf.env.PYTHON + [
'-c',
"import sys\ntry:\n print(sys.implementation.cache_tag)\nexcept AttributeError:\n import imp\n print(imp.get_tag())\n"
]
).strip()
except Errors.WafError:
pass
def options(opt):
pyopt = opt.add_option_group("Python Options")
pyopt.add_option(
'--nopyc',
dest='pyc',
action='store_false',
default=1,
help='Do not install bytecode compiled .pyc files (configuration) [Default:install]'
)
pyopt.add_option(
'--nopyo',
dest='pyo',
action='store_false',
default=1,
help='Do not install optimised compiled .pyo files (configuration) [Default:install]'
)
pyopt.add_option(
'--nopycache',
dest='nopycache',
action='store_true',
help='Do not use __pycache__ directory to install objects [Default:auto]'
)
pyopt.add_option('--python', dest="python", help='python binary to be used [Default: %s]' % sys.executable)
pyopt.add_option(
'--pythondir',
dest='pythondir',
help='Installation path for python modules (py, platform-independent .py and .pyc files)'
)
pyopt.add_option(
'--pythonarchdir',
dest='pythonarchdir',
help='Installation path for python extension (pyext, platform-dependent .so or .dylib files)'
)
| pyo |
python | ray-project__ray | rllib/examples/connectors/classes/protobuf_cartpole_observation_decoder.py | {
"start": 403,
"end": 2930
} | class ____(ConnectorV2):
"""Env-to-module ConnectorV2 piece decoding protobuf obs into CartPole-v1 obs.
Add this connector piece to your env-to-module pipeline, through your algo config:
```
config.env_runners(
env_to_module_connector=(
lambda env, spaces, device: ProtobufCartPoleObservationDecoder()
)
)
```
The incoming observation space must be a 1D Box of dtype uint8
(which is the same as a binary string). The outgoing observation space is the
normal CartPole-v1 1D space: Box(-inf, inf, (4,), float32).
"""
@override(ConnectorV2)
def recompute_output_observation_space(
self,
input_observation_space: gym.Space,
input_action_space: gym.Space,
) -> gym.Space:
# Make sure the incoming observation space is a protobuf (binary string).
assert (
isinstance(input_observation_space, gym.spaces.Box)
and len(input_observation_space.shape) == 1
and input_observation_space.dtype.name == "uint8"
)
# Return CartPole-v1's natural observation space.
return gym.spaces.Box(float("-inf"), float("inf"), (4,), np.float32)
def __call__(
self,
*,
rl_module: RLModule,
batch: Any,
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
# Loop through all episodes and change the observation from a binary string
# to an actual 1D np.ndarray (normal CartPole-v1 obs).
for sa_episode in self.single_agent_episode_iterator(episodes=episodes):
# Get last obs (binary string).
obs = sa_episode.get_observations(-1)
obs_bytes = obs.tobytes()
obs_protobuf = CartPoleObservation()
obs_protobuf.ParseFromString(obs_bytes)
# Set up the natural CartPole-v1 observation tensor from the protobuf
# values.
new_obs = np.array(
[
obs_protobuf.x_pos,
obs_protobuf.x_veloc,
obs_protobuf.angle_pos,
obs_protobuf.angle_veloc,
],
np.float32,
)
# Write the new observation (1D tensor) back into the Episode.
sa_episode.set_observations(new_data=new_obs, at_indices=-1)
# Return `data` as-is.
return batch
| ProtobufCartPoleObservationDecoder |
python | getsentry__sentry | tests/sentry/monitors/endpoints/test_organization_detector_index.py | {
"start": 10489,
"end": 11067
} | class ____(BaseDetectorTestCase):
endpoint = "sentry-api-0-organization-detector-details"
method = "delete"
def test_delete_monitor_incident_detector(self):
detector_id = self.detector.id
monitor_id = self.monitor.id
self.get_success_response(
self.organization.slug,
detector_id,
status_code=204,
)
self.detector.refresh_from_db()
assert self.detector.status == ObjectStatus.PENDING_DELETION
assert Monitor.objects.filter(id=monitor_id).exists()
| OrganizationDetectorDeleteTest |
python | spyder-ide__spyder | external-deps/spyder-kernels/spyder_kernels/utils/tests/test_iofuncs.py | {
"start": 1733,
"end": 1884
} | class ____(CustomObj):
"""A class of objects that cannot be deepcopied."""
def __getstate__(self):
raise RuntimeError()
| UnDeepCopyableObj |
python | ZoranPandovski__al-go-rithms | data_structures/binarySearch_tree/Python/binary_search_tree.py | {
"start": 3705,
"end": 5522
} | class ____():
def __init__(self):
self.root = None
def __setitem__(self, key, value):
node = find(self.root, key)
if not node:
self.root = insert(self.root, key, value)
self.root = balance_bst(self.root)
else:
update(self.root, key, value)
def __getitem__(self, key):
node = find(self.root, key)
return node.value if node else None
def __iter__(self):
return (x for x in list_all(self.root))
def __len__(self):
return tree_size(self.root)
def display(self):
return display_keys(self.root)
# Sample test code for the TreeMap
# treemap = TreeMap()
# treemap.display()
# treemap['aakash'] = aakash
# treemap['jadhesh'] = jadhesh
# treemap['sonaksh'] = sonaksh
# treemap.display()
# print(treemap['jadhesh'])
# treemap['biraj'] = biraj
# treemap['hemanth'] = hemanth
# treemap['siddhant'] = siddhant
# treemap['vishal'] = vishal
# print(len(treemap))
# treemap.display()
# for key, value in treemap:
# print(key, value)
# list(treemap)
# Some sample test code:
# tree = insert(None, jadhesh.username, jadhesh)
# insert(tree, biraj.username, biraj)
# insert(tree, sonaksh.username, sonaksh)
# insert(tree, aakash.username, aakash)
# insert(tree, hemanth.username, hemanth)
# insert(tree, siddhant.username, siddhant)
# insert(tree, vishal.username, siddhant)
# display_keys(tree)
# foundNode = find(tree, 'vishal')
# print(foundNode.key, foundNode.value)
# update(tree, 'siddhant', User('siddhant', 'Siddhant Raj', 'siddhant@raj.com'))
# foundNode = find(tree, 'siddhant')
# print(foundNode.key, foundNode.value)
# list_all(tree)
# data = [(user.username, user) for user in users]
# tree = make_balanced_bst(data)
# display_keys(tree)
| TreeMap |
python | yandexdataschool__Practical_RL | week06_policy_based/atari_wrappers.py | {
"start": 3528,
"end": 4693
} | class ____(ObservationWrapper):
"""Preprocesses image-observations by possibly grayscaling and resizing."""
def __init__(self, env, height=84, width=84, grayscale=True):
super().__init__(env)
self.height = height
self.width = width
self.grayscale = grayscale
ospace = self.env.observation_space
low, high, dtype = ospace.low.min(), ospace.high.max(), ospace.dtype
if self.grayscale:
self.observation_space = Box(
low=low,
high=high,
shape=(height, width),
dtype=dtype,
)
else:
self.observation_space = Box(
low=low,
high=high,
shape=(height, width, *self.observation_space.shape[2:]),
dtype=dtype,
)
def observation(self, observation):
"""Performs image preprocessing."""
if self.grayscale:
observation = cv2.cvtColor(observation, cv2.COLOR_RGB2GRAY)
observation = cv2.resize(observation, (self.width, self.height), cv2.INTER_AREA)
return observation
| ImagePreprocessing |
python | ray-project__ray | python/ray/data/_internal/logical/operators/all_to_all_operator.py | {
"start": 606,
"end": 1701
} | class ____(LogicalOperator):
"""Abstract class for logical operators should be converted to physical
AllToAllOperator.
"""
def __init__(
self,
name: str,
input_op: LogicalOperator,
num_outputs: Optional[int] = None,
sub_progress_bar_names: Optional[List[str]] = None,
ray_remote_args: Optional[Dict[str, Any]] = None,
):
"""
Args:
name: Name for this operator. This is the name that will appear when
inspecting the logical plan of a Dataset.
input_op: The operator preceding this operator in the plan DAG. The outputs
of `input_op` will be the inputs to this operator.
num_outputs: The number of expected output bundles outputted by this
operator.
ray_remote_args: Args to provide to :func:`ray.remote`.
"""
super().__init__(name, [input_op], num_outputs=num_outputs)
self._ray_remote_args = ray_remote_args or {}
self._sub_progress_bar_names = sub_progress_bar_names
| AbstractAllToAll |
python | nedbat__coveragepy | tests/plugin2.py | {
"start": 1722,
"end": 2253
} | class ____(FileReporter):
"""A goofy file reporter."""
def lines(self) -> set[TLineNo]:
# Goofy test arrangement: claim that the file has as many lines as the
# number in its name.
num = os.path.basename(self.filename).split(".")[0].split("_")[1]
return set(range(1, int(num) + 1))
def coverage_init(
reg: Plugins,
options: Any, # pylint: disable=unused-argument
) -> None:
"""Called by coverage to initialize the plugins here."""
reg.add_file_tracer(Plugin())
| MyFileReporter |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/providers/client.py | {
"start": 449,
"end": 754
} | class ____:
@handles(CompletionRequestTypes.CLIENT_REGISTER_CAPABILITY)
@send_response
def handle_register_capability(self, params):
"""TODO: Handle the glob patterns of the files to watch."""
logger.debug('Register Capability: {0}'.format(params))
return {}
| ClientProvider |
python | PrefectHQ__prefect | examples/ai_database_cleanup_with_approval.py | {
"start": 2573,
"end": 3351
} | class ____(BaseModel):
"""Define what to clean and how."""
retention_period: timedelta = Field(
default=timedelta(days=30), description="How far back to keep flow runs"
)
states_to_clean: list[str] = Field(
default=["Completed", "Failed", "Cancelled"],
description="Which states to clean",
)
batch_size: int = Field(default=100, ge=10, le=1000)
dry_run: bool = Field(default=False, description="Preview without deleting")
approval_type: Literal["human", "ai"] = Field(
default="human", description="Human form or AI agent approval"
)
# <AccordionGroup>
#
# <Accordion title="Human Approval: Pause and Review">
#
# When using `approval_type="human"`, the flow pauses and shows a form in the UI.
| RetentionConfig |
python | pytorch__pytorch | torch/distributions/weibull.py | {
"start": 489,
"end": 3467
} | class ____(TransformedDistribution):
r"""
Samples from a two-parameter Weibull distribution.
Example:
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = Weibull(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Weibull distribution with scale=1, concentration=1
tensor([ 0.4784])
Args:
scale (float or Tensor): Scale parameter of distribution (lambda).
concentration (float or Tensor): Concentration parameter of distribution (k/shape).
validate_args (bool, optional): Whether to validate arguments. Default: None.
"""
arg_constraints = {
"scale": constraints.positive,
"concentration": constraints.positive,
}
# pyrefly: ignore [bad-override]
support = constraints.positive
def __init__(
self,
scale: Union[Tensor, float],
concentration: Union[Tensor, float],
validate_args: Optional[bool] = None,
) -> None:
self.scale, self.concentration = broadcast_all(scale, concentration)
self.concentration_reciprocal = self.concentration.reciprocal()
base_dist = Exponential(
torch.ones_like(self.scale), validate_args=validate_args
)
transforms = [
PowerTransform(exponent=self.concentration_reciprocal),
AffineTransform(loc=0, scale=self.scale),
]
# pyrefly: ignore [bad-argument-type]
super().__init__(base_dist, transforms, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Weibull, _instance)
new.scale = self.scale.expand(batch_shape)
new.concentration = self.concentration.expand(batch_shape)
new.concentration_reciprocal = new.concentration.reciprocal()
base_dist = self.base_dist.expand(batch_shape)
transforms = [
PowerTransform(exponent=new.concentration_reciprocal),
AffineTransform(loc=0, scale=new.scale),
]
super(Weibull, new).__init__(base_dist, transforms, validate_args=False)
new._validate_args = self._validate_args
return new
@property
def mean(self) -> Tensor:
return self.scale * torch.exp(torch.lgamma(1 + self.concentration_reciprocal))
@property
def mode(self) -> Tensor:
return (
self.scale
* ((self.concentration - 1) / self.concentration)
** self.concentration.reciprocal()
)
@property
def variance(self) -> Tensor:
return self.scale.pow(2) * (
torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal))
- torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal))
)
def entropy(self):
return (
euler_constant * (1 - self.concentration_reciprocal)
+ torch.log(self.scale * self.concentration_reciprocal)
+ 1
)
| Weibull |
python | numba__numba | numba/core/types/abstract.py | {
"start": 10382,
"end": 10474
} | class ____(Sized, IterableType):
"""
Base class for container types.
"""
| Container |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 37168,
"end": 38387
} | class ____(tuple):
pass
def test_tuple_subclasses_not_generic_sequences():
# see https://github.com/HypothesisWorks/hypothesis/issues/3767.
with temp_registered(TupleSubtype, st.builds(TupleSubtype)):
s = st.from_type(typing.Sequence[int])
assert_no_examples(s, lambda x: isinstance(x, tuple))
def test_custom_strategy_function_resolves_types_conditionally():
sentinel = object()
class A:
pass
class B(A):
pass
class C(A):
pass
def resolve_custom_strategy_for_b(thing):
if thing == B:
return st.just(sentinel)
return NotImplemented
with contextlib.ExitStack() as stack:
stack.enter_context(temp_registered(B, resolve_custom_strategy_for_b))
stack.enter_context(temp_registered(C, st.builds(C)))
# C's strategy can be used for A, but B's cannot because its function
# only returns a strategy for requests for exactly B.
assert_all_examples(st.from_type(A), lambda example: type(example) == C)
assert_all_examples(st.from_type(B), lambda example: example is sentinel)
assert_all_examples(st.from_type(C), lambda example: type(example) == C)
| TupleSubtype |
python | ray-project__ray | python/ray/dag/dag_operation_future.py | {
"start": 240,
"end": 641
} | class ____(ABC, Generic[T]):
"""
A future representing the result of a DAG operation.
This is an abstraction that is internal to each actor,
and is not exposed to the DAG caller.
"""
@abstractmethod
def wait(self):
"""
Wait for the future and return the result of the operation.
"""
raise NotImplementedError
@DeveloperAPI
| DAGOperationFuture |
python | openai__openai-python | src/openai/types/beta/realtime/session.py | {
"start": 700,
"end": 1557
} | class ____(BaseModel):
language: Optional[str] = None
"""The language of the input audio.
Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
"""
model: Optional[str] = None
"""
The model to use for transcription, current options are `gpt-4o-transcribe`,
`gpt-4o-mini-transcribe`, and `whisper-1`.
"""
prompt: Optional[str] = None
"""
An optional text to guide the model's style or continue a previous audio
segment. For `whisper-1`, the
[prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
For `gpt-4o-transcribe` models, the prompt is a free text string, for example
"expect words related to technology".
"""
| InputAudioTranscription |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 199487,
"end": 199633
} | class ____(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
| BasicTCPTest2 |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 93326,
"end": 95119
} | class ____(Expr):
"""Select one or more partitions"""
_parameters = ["frame", "partitions"]
@functools.cached_property
def _meta(self):
return self.frame._meta
def _divisions(self):
divisions = []
for part in self.partitions:
divisions.append(self.frame.divisions[part])
divisions.append(self.frame.divisions[part + 1])
return tuple(divisions)
def _task(self, name: Key, index: int) -> Task:
return Alias(name, (self.frame._name, self.partitions[index])) # type: ignore[return-value]
def _simplify_down(self):
from dask.dataframe.dask_expr import SetIndexBlockwise
from dask.dataframe.tseries.resample import ResampleAggregation
if isinstance(self.frame, Blockwise) and not isinstance(
self.frame, (BlockwiseIO, Fused, SetIndexBlockwise, ResampleAggregation)
):
operands = [
(
Partitions(op, self.partitions)
if (isinstance(op, Expr) and not self.frame._broadcast_dep(op))
else op
)
for op in self.frame.operands
]
return type(self.frame)(*operands)
elif isinstance(self.frame, PartitionsFiltered):
if self.frame._partitions:
partitions = [self.frame._partitions[p] for p in self.partitions]
else:
partitions = self.partitions
# We assume that expressions defining a special "_partitions"
# parameter can internally capture the same logic as `Partitions`
return self.frame.substitute_parameters({"_partitions": partitions})
def _node_label_args(self):
return [self.frame, self.partitions]
| Partitions |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_privacy_urls.py | {
"start": 9855,
"end": 10109
} | class ____(PublicProjectMixin, TestCase):
def login(self):
pass
def is_admin(self):
return False
# ## Private Project Testing ###
@mock.patch("readthedocs.core.utils.trigger_build", mock.MagicMock())
| PublicProjectUnauthAccessTest |
python | bottlepy__bottle | test/test_multipart.py | {
"start": 7812,
"end": 37595
} | class ____(BaseMultipartTest):
def assertMPError(self, **ka):
self.assertRaises(bottle.MultipartError, self.parse, **ka)
def test_big_boundary(self):
self.assertMPError(buffer_size=1024*3)
def test_missing_content_type(self):
self.assertMPError(ctype="")
def test_unsupported_content_type(self):
self.assertMPError(ctype='multipart/fantasy')
def test_missing_boundary(self):
self.assertMPError(ctype="multipart/form-data")
def test_no_terminator(self):
self.write('--foo\r\n',
'Content-Disposition: form-data; name="file1"; filename="random.png"\r\n',
'Content-Type: image/png\r\n', '\r\n', 'abc')
self.assertMPError()
def test_no_newline_after_content(self):
self.write('--foo\r\n',
'Content-Disposition: form-data; name="file1"; filename="random.png"\r\n',
'Content-Type: image/png\r\n', '\r\n', 'abc', '--foo--')
self.assertMPError()
def test_no_newline_after_middle_content(self):
self.write('--foo\r\n',
'Content-Disposition: form-data; name="file1"; filename="random.png"\r\n',
'Content-Type: image/png\r\n', '\r\n', 'abc', '--foo\r\n'
'Content-Disposition: form-data; name="file2"; filename="random.png"\r\n',
'Content-Type: image/png\r\n', '\r\n', 'abc\r\n', '--foo--')
parts = self.parse()
self.assertEqual(len(parts), 1)
self.assertTrue('name="file2"' in parts[0].value)
def test_preamble_before_start_boundary(self):
parts = self.write('Preamble\r\n', '--foo\r\n'
'Content-Disposition: form-data; name="file1"; filename="random.png"\r\n',
'Content-Type: image/png\r\n', '\r\n', 'abc\r\n', '--foo--')
parts = self.parse()
self.assertEqual(parts[0].file.read(), bottle.tob('abc'))
self.assertEqual(parts[0].filename, 'random.png')
self.assertEqual(parts[0].name, 'file1')
self.assertEqual(parts[0].content_type, 'image/png')
def test_no_start_boundary(self):
self.write('--bar\r\n','--nonsense\r\n'
'Content-Disposition: form-data; name="file1"; filename="random.png"\r\n',
'Content-Type: image/png\r\n', '\r\n', 'abc\r\n', '--nonsense--')
self.assertMPError()
def test_disk_limit(self):
self.write('--foo\r\n',
'Content-Disposition: form-data; name="file1"; filename="random.png"\r\n',
'Content-Type: image/png\r\n', '\r\n', 'abc'*1024+'\r\n', '--foo--')
self.assertMPError(memfile_limit=0, disk_limit=1024)
def test_mem_limit(self):
self.write('--foo\r\n',
'Content-Disposition: form-data; name="file1"; filename="random.png"\r\n',
'Content-Type: image/png\r\n', '\r\n', 'abc'*1024+'\r\n', '--foo\r\n',
'Content-Disposition: form-data; name="file2"; filename="random.png"\r\n',
'Content-Type: image/png\r\n', '\r\n', 'abc'*1024+'\r\n', '--foo--')
self.assertMPError(mem_limit=1024*3)
def test_invalid_header(self):
self.write('--foo\r\n',
'Content-Disposition: form-data; name="file1"; filename="random.png"\r\n',
'Content-Type: image/png\r\n',
'Bad header\r\n', '\r\n', 'abc'*1024+'\r\n', '--foo--')
self.assertMPError()
def test_content_length_to_small(self):
self.write('--foo\r\n',
'Content-Disposition: form-data; name="file1"; filename="random.png"\r\n',
'Content-Type: image/png\r\n',
'Content-Length: 111\r\n', '\r\n', 'abc'*1024+'\r\n', '--foo--')
self.assertMPError()
def test_no_disposition_header(self):
self.write('--foo\r\n',
'Content-Type: image/png\r\n', '\r\n', 'abc'*1024+'\r\n', '--foo--')
self.assertMPError()
''' The files used by the following test were taken from the werkzeug library
test suite and are therefore partly copyrighted by the Werkzeug Team
under BSD licence. See http://werkzeug.pocoo.org/ '''
b64d=base64.b64decode
browser_test_cases = {}
browser_test_cases["firefox3-2png1txt"] = {
"data": b64d(
"""
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0xODY0NTQ2NTE3MTM1MTkzNDE5NTE1ODEwMzAx
MDUNCkNvbnRlbnQtRGlzcG9zaXRpb246IGZvcm0tZGF0YTsgbmFtZT0iZmlsZTEiOyBmaWxlbmFt
ZT0iYW5jaG9yLnBuZyINCkNvbnRlbnQtVHlwZTogaW1hZ2UvcG5nDQoNColQTkcNChoKAAAADUlI
RFIAAAAQAAAAEAgGAAAAH/P/YQAAAARnQU1BAACvyDcFiukAAAAZdEVYdFNvZnR3YXJlAEFkb2Jl
IEltYWdlUmVhZHlxyWU8AAABnUlEQVQ4y6VTMWvCQBS+qwEFB10KGaS1P6FDpw7SrVvzAwRRx04V
Ck4K6iAoDhLXdhFcW9qhZCk4FQoW0gp2U4lQRDAUS4hJmn5Xgg2lsQ198PHu3b3vu5d3L9S2bfIf
47wOer1ewzTNtGEYBP48kUjkfsrb8BIAMb1cLovwRfi07wrYzcCr4/1/Am4FzzhzBGZeefR7E7vd
7j0Iu4wYjUYDBMfD0dBiMUQfstns3toKkHgF6EgmqqruW6bFiHcsxr70awVu63Q6NiOmUinquwfM
dF1f28CVgCRJx0jMAQ1BEFquRn7CbYVCYZVbr9dbnJMohoIh9kViu90WEW9nMpmxu4JyubyF/VEs
FiNcgCPyoyxiu7XhCPBzdU4s652VnUccbDabPLyN2C6VSmwdhFgel5DB84AJb64mEUlvmqadTKcv
40gkUkUsg1DjeZ7iRsrWgByP71T7/afxYrHIYry/eoBD9mxsaK4VRamFw2EBQknMAWGvRClNTpQJ
AfkCxFNgBmiez1ipVA4hdgQcOD/TLfylKIo3vubgL/YBnIw+ioOMLtwAAAAASUVORK5CYIINCi0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tMTg2NDU0NjUxNzEzNTE5MzQxOTUxNTgxMDMwMTA1
DQpDb250ZW50LURpc3Bvc2l0aW9uOiBmb3JtLWRhdGE7IG5hbWU9ImZpbGUyIjsgZmlsZW5hbWU9
ImFwcGxpY2F0aW9uX2VkaXQucG5nIg0KQ29udGVudC1UeXBlOiBpbWFnZS9wbmcNCg0KiVBORw0K
GgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdh
cmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJRSURBVBgZpcHda81xHMDx9+d3fudYzuYw2RaZ5yTW
olEiuZpCSjGJFEktUUr8A6ZxQZGHmDtqdrGUXHgoeZqSp1F2bLFWjtkOB8PZzvmd7+djv5XaBRfL
6yVmxv+QjQeu7l25uuZYJmtxM0AVU8Wpw9RQU8w51AxzDqfKhFjwq6Mjdbj1RN0Zv2ZFzaloUdwr
L2Is4r+y7hRwxs8G5mUzPxmrwcA8hvnmjIZtcxmr3Y09hHwzJZQvOAwwNZyCYqgaThVXMFzBCD7f
Jfv8MpHiKvaV3ePV2f07fMwIiSeIGeYJJoao4HmCiIeIQzPXifY+paJqO4lZi/nWPZ/krabjvlNH
yANMBAQiBiqgakQMCunbxHJviM9bQeZdBzHJUzKhguLJlQnf1BghAmZ4gImAgAjk++8jP56QmL2G
XG8zsfFCz8skA1mQXKbaU3X8ISIgQsgDcun7FL7cJjFnLUMfLyLRr0SLS4hbhiup5Szd19rpFYKA
ESKICCERoS95neyHmyTmbmAodQ4vGpAfmEn6YTtTahv4ODiRkGdOCUUAAUSE/uQNfqTaKFu4jvyn
JiIxIzcwg/SjF1RsOk9R+QJMlZCvqvwhQFdbM4XvrynIVHpfn2ZSWYyhzHS+PUtSueUC0cQ0QmpG
yE9197TUnwzq1DnUKbXSxOb6S7xtPkjngzbGVVbzvS/FjaGt9DU8xlRRJdTCMDEzRjuyZ1FwaFe9
j+d4eecaPd1dPxNTSlfWHm1v5y/EzBitblXp4JLZ5f6yBbOwaK5tsD+9c33jq/f8w2+mRSjOllPh
kAAAAABJRU5ErkJggg0KLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0xODY0NTQ2NTE3MTM1
MTkzNDE5NTE1ODEwMzAxMDUNCkNvbnRlbnQtRGlzcG9zaXRpb246IGZvcm0tZGF0YTsgbmFtZT0i
dGV4dCINCg0KZXhhbXBsZSB0ZXh0DQotLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLTE4NjQ1
NDY1MTcxMzUxOTM0MTk1MTU4MTAzMDEwNS0tDQo="""
),
"boundary": "---------------------------186454651713519341951581030105",
"files": {
"file1": (
"anchor.png",
"image/png",
b64d(
"""
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0
U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAGdSURBVDjLpVMxa8JAFL6rAQUHXQoZpLU/
oUOnDtKtW/MDBFHHThUKTgrqICgOEtd2EVxb2qFkKTgVChbSCnZTiVBEMBRLiEmafleCDaWxDX3w
8e7dve+7l3cv1LZt8h/jvA56vV7DNM20YRgE/jyRSOR+ytvwEgAxvVwui/BF+LTvCtjNwKvj/X8C
bgXPOHMEZl559HsTu93uPQi7jBiNRgMEx8PR0GIxRB+y2eze2gqQeAXoSCaqqu5bpsWIdyzGvvRr
BW7rdDo2I6ZSKeq7B8x0XV/bwJWAJEnHSMwBDUEQWq5GfsJthUJhlVuv11uckyiGgiH2RWK73RYR
b2cymbG7gnK5vIX9USwWI1yAI/KjLGK7teEI8HN1TizrnZWdRxxsNps8vI3YLpVKbB2EWB6XkMHz
gAlvriYRSW+app1Mpy/jSCRSRSyDUON5nuJGytaAHI/vVPv9p/FischivL96gEP2bGxorhVFqYXD
YQFCScwBYa9EKU1OlAkB+QLEU2AGaJ7PWKlUDiF2BBw4P9Mt/KUoije+5uAv9gGcjD6Kg4wu3AAA
AABJRU5ErkJggg=="""
),
),
"file2": (
"application_edit.png",
"image/png",
b64d(
"""
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0
U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJRSURBVBgZpcHda81xHMDx9+d3fudYzuYw
2RaZ5yTWolEiuZpCSjGJFEktUUr8A6ZxQZGHmDtqdrGUXHgoeZqSp1F2bLFWjtkOB8PZzvmd7+dj
v5XaBRfL6yVmxv+QjQeu7l25uuZYJmtxM0AVU8Wpw9RQU8w51AxzDqfKhFjwq6Mjdbj1RN0Zv2ZF
zaloUdwrL2Is4r+y7hRwxs8G5mUzPxmrwcA8hvnmjIZtcxmr3Y09hHwzJZQvOAwwNZyCYqgaThVX
MFzBCD7fJfv8MpHiKvaV3ePV2f07fMwIiSeIGeYJJoao4HmCiIeIQzPXifY+paJqO4lZi/nWPZ/k
rabjvlNHyANMBAQiBiqgakQMCunbxHJviM9bQeZdBzHJUzKhguLJlQnf1BghAmZ4gImAgAjk++8j
P56QmL2GXG8zsfFCz8skA1mQXKbaU3X8ISIgQsgDcun7FL7cJjFnLUMfLyLRr0SLS4hbhiup5Szd
19rpFYKAESKICCERoS95neyHmyTmbmAodQ4vGpAfmEn6YTtTahv4ODiRkGdOCUUAAUSE/uQNfqTa
KFu4jvynJiIxIzcwg/SjF1RsOk9R+QJMlZCvqvwhQFdbM4XvrynIVHpfn2ZSWYyhzHS+PUtSueUC
0cQ0QmpGyE9197TUnwzq1DnUKbXSxOb6S7xtPkjngzbGVVbzvS/FjaGt9DU8xlRRJdTCMDEzRjuy
Z1FwaFe9j+d4eecaPd1dPxNTSlfWHm1v5y/EzBitblXp4JLZ5f6yBbOwaK5tsD+9c33jq/f8w2+m
RSjOllPhkAAAAABJRU5ErkJggg=="""
),
),
},
"forms": {"text": u"example text"},
}
browser_test_cases["firefox3-2pnglongtext"] = {
"data": b64d(
"""
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0xNDkwNDA0NDczOTc4NzE5MTAzMTc1NDcxMTc0
OA0KQ29udGVudC1EaXNwb3NpdGlvbjogZm9ybS1kYXRhOyBuYW1lPSJmaWxlMSI7IGZpbGVuYW1l
PSJhY2NlcHQucG5nIg0KQ29udGVudC1UeXBlOiBpbWFnZS9wbmcNCg0KiVBORw0KGgoAAAANSUhE
UgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUg
SW1hZ2VSZWFkeXHJZTwAAAKfSURBVDjLpZPrS1NhHMf9O3bOdmwDCWREIYKEUHsVJBI7mg3FvCxL
09290jZj2EyLMnJexkgpLbPUanNOberU5taUMnHZUULMvelCtWF0sW/n7MVMEiN64AsPD8/n83uu
cQDi/id/DBT4Dolypw/qsz0pTMbj/WHpiDgsdSUyUmeiPt2+V7SrIM+bSss8ySGdR4abQQv6lrui
6VxsRonrGCS9VEjSQ9E7CtiqdOZ4UuTqnBHO1X7YXl6Daa4yGq7vWO1D40wVDtj4kWQbn94myPGk
CDPdSesczE2sCZShwl8CzcwZ6NiUs6n2nYX99T1cnKqA2EKui6+TwphA5k4yqMayopU5mANV3lNQ
TBdCMVUA9VQh3GuDMHiVcLCS3J4jSLhCGmKCjBEx0xlshjXYhApfMZRP5CyYD+UkG08+xt+4wLVQ
ZA1tzxthm2tEfD3JxARH7QkbD1ZuozaggdZbxK5kAIsf5qGaKMTY2lAU/rH5HW3PLsEwUYy+YCcE
RmIjJpDcpzb6l7th9KtQ69fi09ePUej9l7cx2DJbD7UrG3r3afQHOyCo+V3QQzE35pvQvnAZukk5
zL5qRL59jsKbPzdheXoBZc4saFhBS6AO7V4zqCpiawuptwQG+UAa7Ct3UT0hh9p9EnXT5Vh6t4C2
2QaUDh6HwnECOmcO7K+6kW49DKqS2DrEZCtfuI+9GrNHg4fMHVSO5kE7nAPVkAxKBxcOzsajpS4Y
h4ohUPPWKTUh3PaQEptIOr6BiJjcZXCwktaAGfrRIpwblqOV3YKdhfXOIvBLeREWpnd8ynsaSJoy
ESFphwTtfjN6X1jRO2+FxWtCWksqBApeiFIR9K6fiTpPiigDoadqCEag5YUFKl6Yrciw0VOlhOiv
v/Ff8wtn0KzlebrUYwAAAABJRU5ErkJggg0KLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0x
NDkwNDA0NDczOTc4NzE5MTAzMTc1NDcxMTc0OA0KQ29udGVudC1EaXNwb3NpdGlvbjogZm9ybS1k
YXRhOyBuYW1lPSJmaWxlMiI7IGZpbGVuYW1lPSJhZGQucG5nIg0KQ29udGVudC1UeXBlOiBpbWFn
ZS9wbmcNCg0KiVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK
6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJvSURBVDjLpZPrS5NhGIf9
W7YvBYOkhlkoqCklWChv2WyKik7blnNris72bi6dus0DLZ0TDxW1odtopDs4D8MDZuLU0kXq61Ci
jSIIasOvv94VTUfLiB74fXngup7nvrnvJABJ/5PfLnTTdcwOj4RsdYmo5glBWP6iOtzwvIKSWstI
0Wgx80SBblpKtE9KQs/We7EaWoT/8wbWP61gMmCH0lMDvokT4j25TiQU/ITFkek9Ow6+7WH2gwsm
ahCPdwyw75uw9HEO2gUZSkfyI9zBPCJOoJ2SMmg46N61YO/rNoa39Xi41oFuXysMfh36/Fp0b7bA
fWAH6RGi0HglWNCbzYgJaFjRv6zGuy+b9It96N3SQvNKiV9HvSaDfFEIxXItnPs23BzJQd6DDEVM
0OKsoVwBG/1VMzpXVWhbkUM2K4oJBDYuGmbKIJ0qxsAbHfRLzbjcnUbFBIpx/qH3vQv9b3U03IQ/
HfFkERTzfFj8w8jSpR7GBE123uFEYAzaDRIqX/2JAtJbDat/COkd7CNBva2cMvq0MGxp0PRSCPF8
BXjWG3FgNHc9XPT71Ojy3sMFdfJRCeKxEsVtKwFHwALZfCUk3tIfNR8XiJwc1LmL4dg141JPKtj3
WUdNFJqLGFVPC4OkR4BxajTWsChY64wmCnMxsWPCHcutKBxMVp5mxA1S+aMComToaqTRUQknLTH6
2kHOVEE+VQnjahscNCy0cMBWsSI0TCQcZc5ALkEYckL5A5noWSBhfm2AecMAjbcRWV0pUTh0HE64
TNf0mczcnnQyu/MilaFJCae1nw2fbz1DnVOxyGTlKeZft/Ff8x1BRssfACjTwQAAAABJRU5ErkJg
gg0KLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0xNDkwNDA0NDczOTc4NzE5MTAzMTc1NDcx
MTc0OA0KQ29udGVudC1EaXNwb3NpdGlvbjogZm9ybS1kYXRhOyBuYW1lPSJ0ZXh0Ig0KDQotLWxv
bmcgdGV4dA0KLS13aXRoIGJvdW5kYXJ5DQotLWxvb2thbGlrZXMtLQ0KLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0xNDkwNDA0NDczOTc4NzE5MTAzMTc1NDcxMTc0OC0tDQo="""
),
"boundary": "---------------------------14904044739787191031754711748",
"files": {
"file1": (
"accept.png",
"image/png",
b64d(
"""
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0
U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAKfSURBVDjLpZPrS1NhHMf9O3bOdmwDCWRE
IYKEUHsVJBI7mg3FvCxL09290jZj2EyLMnJexkgpLbPUanNOberU5taUMnHZUULMvelCtWF0sW/n
7MVMEiN64AsPD8/n83uucQDi/id/DBT4Dolypw/qsz0pTMbj/WHpiDgsdSUyUmeiPt2+V7SrIM+b
Sss8ySGdR4abQQv6lrui6VxsRonrGCS9VEjSQ9E7CtiqdOZ4UuTqnBHO1X7YXl6Daa4yGq7vWO1D
40wVDtj4kWQbn94myPGkCDPdSesczE2sCZShwl8CzcwZ6NiUs6n2nYX99T1cnKqA2EKui6+TwphA
5k4yqMayopU5mANV3lNQTBdCMVUA9VQh3GuDMHiVcLCS3J4jSLhCGmKCjBEx0xlshjXYhApfMZRP
5CyYD+UkG08+xt+4wLVQZA1tzxthm2tEfD3JxARH7QkbD1ZuozaggdZbxK5kAIsf5qGaKMTY2lAU
/rH5HW3PLsEwUYy+YCcERmIjJpDcpzb6l7th9KtQ69fi09ePUej9l7cx2DJbD7UrG3r3afQHOyCo
+V3QQzE35pvQvnAZukk5zL5qRL59jsKbPzdheXoBZc4saFhBS6AO7V4zqCpiawuptwQG+UAa7Ct3
UT0hh9p9EnXT5Vh6t4C22QaUDh6HwnECOmcO7K+6kW49DKqS2DrEZCtfuI+9GrNHg4fMHVSO5kE7
nAPVkAxKBxcOzsajpS4Yh4ohUPPWKTUh3PaQEptIOr6BiJjcZXCwktaAGfrRIpwblqOV3YKdhfXO
IvBLeREWpnd8ynsaSJoyESFphwTtfjN6X1jRO2+FxWtCWksqBApeiFIR9K6fiTpPiigDoadqCEag
5YUFKl6Yrciw0VOlhOivv/Ff8wtn0KzlebrUYwAAAABJRU5ErkJggg=="""
),
),
"file2": (
"add.png",
"image/png",
b64d(
"""
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0
U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJvSURBVDjLpZPrS5NhGIf9W7YvBYOkhlko
qCklWChv2WyKik7blnNris72bi6dus0DLZ0TDxW1odtopDs4D8MDZuLU0kXq61CijSIIasOvv94V
TUfLiB74fXngup7nvrnvJABJ/5PfLnTTdcwOj4RsdYmo5glBWP6iOtzwvIKSWstI0Wgx80SBblpK
tE9KQs/We7EaWoT/8wbWP61gMmCH0lMDvokT4j25TiQU/ITFkek9Ow6+7WH2gwsmahCPdwyw75uw
9HEO2gUZSkfyI9zBPCJOoJ2SMmg46N61YO/rNoa39Xi41oFuXysMfh36/Fp0b7bAfWAH6RGi0Hgl
WNCbzYgJaFjRv6zGuy+b9It96N3SQvNKiV9HvSaDfFEIxXItnPs23BzJQd6DDEVM0OKsoVwBG/1V
MzpXVWhbkUM2K4oJBDYuGmbKIJ0qxsAbHfRLzbjcnUbFBIpx/qH3vQv9b3U03IQ/HfFkERTzfFj8
w8jSpR7GBE123uFEYAzaDRIqX/2JAtJbDat/COkd7CNBva2cMvq0MGxp0PRSCPF8BXjWG3FgNHc9
XPT71Ojy3sMFdfJRCeKxEsVtKwFHwALZfCUk3tIfNR8XiJwc1LmL4dg141JPKtj3WUdNFJqLGFVP
C4OkR4BxajTWsChY64wmCnMxsWPCHcutKBxMVp5mxA1S+aMComToaqTRUQknLTH62kHOVEE+VQnj
ahscNCy0cMBWsSI0TCQcZc5ALkEYckL5A5noWSBhfm2AecMAjbcRWV0pUTh0HE64TNf0mczcnnQy
u/MilaFJCae1nw2fbz1DnVOxyGTlKeZft/Ff8x1BRssfACjTwQAAAABJRU5ErkJggg=="""
),
),
},
"forms": {"text": u"--long text\r\n--with boundary\r\n--lookalikes--"},
}
browser_test_cases["opera8-2png1txt"] = {
"data": b64d(
"""
LS0tLS0tLS0tLS0tekVPOWpRS21MYzJDcTg4YzIzRHgxOQ0KQ29udGVudC1EaXNwb3NpdGlvbjog
Zm9ybS1kYXRhOyBuYW1lPSJmaWxlMSI7IGZpbGVuYW1lPSJhcnJvd19icmFuY2gucG5nIg0KQ29u
dGVudC1UeXBlOiBpbWFnZS9wbmcNCg0KiVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9h
AAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAHY
SURBVDjLlVLPS1RxHJynpVu7KEn0Vt+2l6IO5qGCIsIwCPwD6hTUaSk6REoUHeoQ0qVAMrp0COpY
0SUIPVRgSl7ScCUTst6zIoqg0y7lvpnPt8MWKuuu29w+hxnmx8dzzmE5+l7mxk1u/a3Dd/ejDjSs
II/m3vjJ9MF0yt93ZuTkdD0CnnMO/WOnmsxsJp3yd2zfvA3mHOa+zuHTjy/zojrvHX1YqunAZE9M
lpUcZAaZQBNIZUg9XdPBP5wePuEO7eyGQXg29QL3jz3y1oqwbvkhCuYEOQMp/HeJohCbICMUVwr0
DvZcOnK9u7GmQNmBQLJCgORxkneqRmAs0BFmDi0bW9E72PPda/BikwWi0OEHkNR14MrewsTAZF+l
AAWZEH6LUCwUkUlntrS1tiG5IYlEc6LcjYjSYuncngtdhakbM5dXlhgTNEMYLqB9q49MKgsPjTBX
ntVgkDNIgmI1VY2Q7QzgJ9rx++ci3ofziBYiiELQEUAyhB/D29M3Zy+uIkDIhGYvgeKvIkbHxz6T
evzq6ut+ANh9fldetMn80OzZVVdgLFjBQ0tpEz68jcB4ifx3pQeictVXIEETnBPCKMLEwBIZAPJD
767V/ETGwsjzYYiC6vzEP9asLo3SGuQvAAAAAElFTkSuQmCCDQotLS0tLS0tLS0tLS16RU85alFL
bUxjMkNxODhjMjNEeDE5DQpDb250ZW50LURpc3Bvc2l0aW9uOiBmb3JtLWRhdGE7IG5hbWU9ImZp
bGUyIjsgZmlsZW5hbWU9ImF3YXJkX3N0YXJfYnJvbnplXzEucG5nIg0KQ29udGVudC1UeXBlOiBp
bWFnZS9wbmcNCg0KiVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/I
NwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJvSURBVDjLhZNNSFRR
FIC/N++9eWMzhkl/ZJqFMQMRFvTvImkXSdKiVRAURBRRW1eZA9EqaNOiFlZEtQxKyrJwUS0K+qEQ
zaTE/AtLHR3HmffuvafFNINDWGdz7z2c7+Nyzr2WiFAIffaMBDW1+B0diAgYgxiDiCDG4DU1QfcL
os+fWAXGYUGIUsXiAliUFER+sBAhVCIIVB7QGtEat1oTbcwVz2LMfwR+gPg+oY0bEa3x6sHdUoVd
niMUj0M2i/j+PwVJa2QUu7YWp34D7mqNWdNApD6Ks24dpvcL4gfJRQXevbutjI4lGRzCS9iYukPo
5dvxVqWQvn6k/2uyoudd60LGEhG43VBGyI4j2ADZ7vDJ8DZ9Img4hw4cvO/3UZ1vH3p7lrWRLwGV
neD4y6G84NaOYSoTVYIFIiAGvXI3OWctJv0TW03jZb5gZSfzl9YBpMcIzUwdzQsuVR9EyR3TeCqm
6w5jZiZQMz8xsxOYzDTi50AMVngJNgrnUweRbwMPiLpHrOJDOl9Vh6HD7GyO52qa0VPj6MwUJpNC
5mYQS/DUJLH3zzRp1cqN8YulTUyODBBzt4X6Ou870z2I8ZHsHJLLYNQ8jusQ6+2exJf9BfivKdAy
mKZiaVdodhBRAagAjIbgzxp20lwb6Vp0jADYkQO6IpHfuoqInSJUVoE2HrpyRQ1tic2LC9p3lSHW
Ph2rJfL1MeVP2weWvHp8s3ziNZ49i1q6HrR1YHGBNnt1dG2Z++gC4TdvrqNkK1eHj7ljQ/ujHx6N
yPw8BFIiKPmNpKar7P7xb/zyT9P+o7OYvzzYSUt8U+TzxytodixEfgN3CFlQMNAcMgAAAABJRU5E
rkJggg0KLS0tLS0tLS0tLS0tekVPOWpRS21MYzJDcTg4YzIzRHgxOQ0KQ29udGVudC1EaXNwb3Np
dGlvbjogZm9ybS1kYXRhOyBuYW1lPSJ0ZXh0Ig0KDQpibGFmYXNlbCDDtsOkw7wNCi0tLS0tLS0t
LS0tLXpFTzlqUUttTGMyQ3E4OGMyM0R4MTktLQ0K"""
),
"boundary": "----------zEO9jQKmLc2Cq88c23Dx19",
"files": {
"file1": (
"arrow_branch.png",
"image/png",
b64d(
"""
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0
U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAHYSURBVDjLlVLPS1RxHJynpVu7KEn0Vt+2
l6IO5qGCIsIwCPwD6hTUaSk6REoUHeoQ0qVAMrp0COpY0SUIPVRgSl7ScCUTst6zIoqg0y7lvpnP
t8MWKuuu29w+hxnmx8dzzmE5+l7mxk1u/a3Dd/ejDjSsII/m3vjJ9MF0yt93ZuTkdD0CnnMO/WOn
msxsJp3yd2zfvA3mHOa+zuHTjy/zojrvHX1YqunAZE9MlpUcZAaZQBNIZUg9XdPBP5wePuEO7eyG
QXg29QL3jz3y1oqwbvkhCuYEOQMp/HeJohCbICMUVwr0DvZcOnK9u7GmQNmBQLJCgORxkneqRmAs
0BFmDi0bW9E72PPda/BikwWi0OEHkNR14MrewsTAZF+lAAWZEH6LUCwUkUlntrS1tiG5IYlEc6Lc
jYjSYuncngtdhakbM5dXlhgTNEMYLqB9q49MKgsPjTBXntVgkDNIgmI1VY2Q7QzgJ9rx++ci3ofz
iBYiiELQEUAyhB/D29M3Zy+uIkDIhGYvgeKvIkbHxz6Tevzq6ut+ANh9fldetMn80OzZVVdgLFjB
Q0tpEz68jcB4ifx3pQeictVXIEETnBPCKMLEwBIZAPJD767V/ETGwsjzYYiC6vzEP9asLo3SGuQv
AAAAAElFTkSuQmCC"""
),
),
"file2": (
"award_star_bronze_1.png",
"image/png",
b64d(
"""
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0
U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJvSURBVDjLhZNNSFRRFIC/N++9eWMzhkl/
ZJqFMQMRFvTvImkXSdKiVRAURBRRW1eZA9EqaNOiFlZEtQxKyrJwUS0K+qEQzaTE/AtLHR3Hmffu
vafFNINDWGdz7z2c7+Nyzr2WiFAIffaMBDW1+B0diAgYgxiDiCDG4DU1QfcLos+fWAXGYUGIUsXi
AliUFER+sBAhVCIIVB7QGtEat1oTbcwVz2LMfwR+gPg+oY0bEa3x6sHdUoVdniMUj0M2i/j+PwVJ
a2QUu7YWp34D7mqNWdNApD6Ks24dpvcL4gfJRQXevbutjI4lGRzCS9iYukPo5dvxVqWQvn6k/2uy
oudd60LGEhG43VBGyI4j2ADZ7vDJ8DZ9Img4hw4cvO/3UZ1vH3p7lrWRLwGVneD4y6G84NaOYSoT
VYIFIiAGvXI3OWctJv0TW03jZb5gZSfzl9YBpMcIzUwdzQsuVR9EyR3TeCqm6w5jZiZQMz8xsxOY
zDTi50AMVngJNgrnUweRbwMPiLpHrOJDOl9Vh6HD7GyO52qa0VPj6MwUJpNC5mYQS/DUJLH3zzRp
1cqN8YulTUyODBBzt4X6Ou870z2I8ZHsHJLLYNQ8jusQ6+2exJf9BfivKdAymKZiaVdodhBRAagA
jIbgzxp20lwb6Vp0jADYkQO6IpHfuoqInSJUVoE2HrpyRQ1tic2LC9p3lSHWPh2rJfL1MeVP2weW
vHp8s3ziNZ49i1q6HrR1YHGBNnt1dG2Z++gC4TdvrqNkK1eHj7ljQ/ujHx6NyPw8BFIiKPmNpKar
7P7xb/zyT9P+o7OYvzzYSUt8U+TzxytodixEfgN3CFlQMNAcMgAAAABJRU5ErkJggg=="""
),
),
},
"forms": {"text": u"blafasel öäü"},
}
browser_test_cases["webkit3-2png1txt"] = {
"data": b64d(
"""
LS0tLS0tV2ViS2l0Rm9ybUJvdW5kYXJ5amRTRmhjQVJrOGZ5R055Ng0KQ29udGVudC1EaXNwb3Np
dGlvbjogZm9ybS1kYXRhOyBuYW1lPSJmaWxlMSI7IGZpbGVuYW1lPSJndGstYXBwbHkucG5nIg0K
Q29udGVudC1UeXBlOiBpbWFnZS9wbmcNCg0KiVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACN
iR0NAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAN1wAADdcBQiibeAAAABl0RVh0U29mdHdhcmUA
d3d3Lmlua3NjYXBlLm9yZ5vuPBoAAANnSURBVDiNldJ9aJVVHAfw7znPuS/PvW4405WbLWfbsBuN
bramq5Tp7mLqIFPXINlwpAitaCAPjWKgBdXzR2TBpEZoadAyCVGndttCFNxqLXORK7x3y704NlzX
zfs8d89znuf0R/fKk03xHvjCOZxzPpzzO4cIIZBuC6nsGYmRrwFMWVw0hxV+PDVH0gVDKvNSRgZf
rm5+QCISOi58pY1MXhm1uHg+rPDfabqnoxJpKQ2snf/gwgKY3ut4pfodX/lTGwokRt4AgLTAkMoK
3cz7enVJg/fyTCdGE/3gwsTo+LBu2+J82qDE6IEXyrd7YvYwbpgjyPOtQHTikvhz+NKgsNGWFhhS
WU3uwqWPBx9aRwfjPTCFgXx5JY50tumWKbaFFS7uGQypLINKZH/tukb/kN6DSSOCFfO3oqu/3biZ
iH0ZVvjF1Np7AiVG31sdXO/P8GfhqtaLbE8BqOlBZ++xuMXFbudaljxBDnNJHbZlFwF407bFh6kr
hFRW7Jcztlc9Uee5HD+DaWsCTy/YgbaOvZpl2Y1hhU87QVLxvpQpMfpzfeXuZfmLA/Rw1wdaZOS3
Pm7aNQDGJUZ/qatqKs5etIj03TiKQv8aaFOWOHRm30+nm4zS229DmVs6Ulm6OW/50iD9G1Hsqnrb
t2lNwyoXYwMAPnk4N1D4aO4qEtW6wagHeZ4SfNP1mW6Zdt1c5WEE8Lll5qKCQbdiGIh/h+JlK6Wi
xcHM4z2fb9tUtkOO6hdw3Yzi2axdON33xaxuzLSGFf7HXCA1Dav+5Nn2Kyd7DyYK5bXw0QWIJM4j
7rqGmvKd8gwZw5D+I3K8jyGhmzj366lpi4uWOz0gEUIgpDKPxGjr/VlLanZubJknXLMYiH8Pjccw
K26C27Oouu8tfHysWbs6HnkxrPATdwVTLaSyzW63+8BLzzX6H1lSSrtjBzFpRPBkZi0mrk3Z7Z2t
P5xqMiruhP0PTKL5EqMnSgKr87eUvSqPGf3Ipsux53CDpie0QFjhf90NhBDiVlJ1LaqmcqXq2l/7
aU7826E94rWjQb3iXbYXgAzAC8ADwI1//zF1OkQIAUIIBSAlc6tfpkjr52XTj4SFi937eP3MmDAB
2I5YyaT63AmyuVDHmAAQt0FOzARg/aeGhBCS3EjnCBygMwKAnXL+AdDkiZ/xYgR3AAAAAElFTkSu
QmCCDQotLS0tLS1XZWJLaXRGb3JtQm91bmRhcnlqZFNGaGNBUms4ZnlHTnk2DQpDb250ZW50LURp
c3Bvc2l0aW9uOiBmb3JtLWRhdGE7IG5hbWU9ImZpbGUyIjsgZmlsZW5hbWU9Imd0ay1uby5wbmci
DQpDb250ZW50LVR5cGU6IGltYWdlL3BuZw0KDQqJUE5HDQoaCgAAAA1JSERSAAAAFAAAABQIBgAA
AI2JHQ0AAAAEc0JJVAgICAh8CGSIAAAACXBIWXMAAA3XAAAN1wFCKJt4AAAAGXRFWHRTb2Z0d2Fy
ZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAAAzVJREFUOI2tlM9rG0cUxz8zu7OzsqhtyTIONDG2g9ue
UnIwFEqCwYUeTC+99u5T/4FAKKUEeuh/4FPvOZXiWw3GpRRcGjW0h1KwLLe4juOspJUlS95frwft
CkdJbh347o95bz+8mfedVSLC/zncNwUeKnVfw4YD6yncBXCgnsJeBruPRPZf952arPCBUhUL216p
tLm0vGxmq1X3rbk5AC6CgE67nTQbjTgaDHauYOtrkfYbgV8o9SHw/crKytR7d+5YDXhzc2hjEBGy
OCZutciU4s+nT68ajcYl8MlXIj+9AnygVMXA4draWqVWqaBLJcz09ChLBBGBXHEYImlK0G5zcHDQ
juF2UakuyBa2l27dmqqWywxOTpAkIWq1iILgFWVxzOXREZVymaXFxSkL2wVHFw0w1m6urq7asF7H
sZa01SINAiQIyIp7q0XaapEEAcp1CZ884Z3VVWus3Xyo1P1xlzVsvL2wYJLTUwhDdBiiHAedL1EV
+yxCJoJkGTpJkDAkOj3l5o0b5vD4eAPYd3M7rM+WSq7qdLCAOjtD+z46y1DXgJkIZNmIHUWj3E6H
melp14H1cYUZ3J31fZyTE1zA7fVw+n0cERSg8v2RUS5pPqeArNtlZmGBwqtjY+skwYig80lXBCff
5OvANFeSxzIRojge5+j8Uu9dXOD5Pt6o41jAz1W69uznMQ8wgOf79LpdNNTHwBT22r1ebDwPt0h8
DbQAFTADGGvp9PtxCntjYAa7zW43wVpca3HyZZsJaAF0C/k+4vs0wzDJYHcMfCSyHyfJzq/n50NT
raKVwhl1H3cCpAsphVut8tvz58M4SXaKn8X4pFzB1lG/P2gOBuhaDYxBJhqR5e8Yg56f53gwoNHr
Da9gq+CMz7JSauoz+HgFvr1trX+vXPZKUYSbJCMTA+K6xMYw8Dx+7Pfjw+Fw+Dt8/h38ALwQkeg6
cAaoLcLyp/BlVam1dz3PWdDaqbkjdwVpymmaZn9FUXouUn8M3zyDJvAC+PclYA6dBmpA5SO4dxM+
mIf3fVgCGMLfz+CPf+CXPfgZCIFz4ExEkpeWfH0opZzcKYUsI38nIy5D4BK4kgnAfwLblOaQdQsS
AAAAAElFTkSuQmCCDQotLS0tLS1XZWJLaXRGb3JtQm91bmRhcnlqZFNGaGNBUms4ZnlHTnk2DQpD
b250ZW50LURpc3Bvc2l0aW9uOiBmb3JtLWRhdGE7IG5hbWU9InRleHQiDQoNCnRoaXMgaXMgYW5v
dGhlciB0ZXh0IHdpdGggw7xtbMOkw7x0cw0KLS0tLS0tV2ViS2l0Rm9ybUJvdW5kYXJ5amRTRmhj
QVJrOGZ5R055Ni0tDQo="""
),
"boundary": "----WebKitFormBoundaryjdSFhcARk8fyGNy6",
"files": {
"file1": (
"gtk-apply.png",
"image/png",
b64d(
"""
iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz
AAAN1wAADdcBQiibeAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAANnSURB
VDiNldJ9aJVVHAfw7znPuS/PvW4405WbLWfbsBuNbramq5Tp7mLqIFPXINlwpAitaCAPjWKgBdXz
R2TBpEZoadAyCVGndttCFNxqLXORK7x3y704NlzXzfs8d89znuf0R/fKk03xHvjCOZxzPpzzO4cI
IZBuC6nsGYmRrwFMWVw0hxV+PDVH0gVDKvNSRgZfrm5+QCISOi58pY1MXhm1uHg+rPDfabqnoxJp
KQ2snf/gwgKY3ut4pfodX/lTGwokRt4AgLTAkMoK3cz7enVJg/fyTCdGE/3gwsTo+LBu2+J82qDE
6IEXyrd7YvYwbpgjyPOtQHTikvhz+NKgsNGWFhhSWU3uwqWPBx9aRwfjPTCFgXx5JY50tumWKbaF
FS7uGQypLINKZH/tukb/kN6DSSOCFfO3oqu/3biZiH0ZVvjF1Np7AiVG31sdXO/P8GfhqtaLbE8B
qOlBZ++xuMXFbudaljxBDnNJHbZlFwF407bFh6krhFRW7Jcztlc9Uee5HD+DaWsCTy/YgbaOvZpl
2Y1hhU87QVLxvpQpMfpzfeXuZfmLA/Rw1wdaZOS3Pm7aNQDGJUZ/qatqKs5etIj03TiKQv8aaFOW
OHRm30+nm4zS229DmVs6Ulm6OW/50iD9G1Hsqnrbt2lNwyoXYwMAPnk4N1D4aO4qEtW6wagHeZ4S
fNP1mW6Zdt1c5WEE8Lll5qKCQbdiGIh/h+JlK6WixcHM4z2fb9tUtkOO6hdw3Yzi2axdON33xaxu
zLSGFf7HXCA1Dav+5Nn2Kyd7DyYK5bXw0QWIJM4j7rqGmvKd8gwZw5D+I3K8jyGhmzj366lpi4uW
Oz0gEUIgpDKPxGjr/VlLanZubJknXLMYiH8PjccwK26C27Oouu8tfHysWbs6HnkxrPATdwVTLaSy
zW63+8BLzzX6H1lSSrtjBzFpRPBkZi0mrk3Z7Z2tP5xqMiruhP0PTKL5EqMnSgKr87eUvSqPGf3I
psux53CDpie0QFjhf90NhBDiVlJ1LaqmcqXq2l/7aU7826E94rWjQb3iXbYXgAzAC8ADwI1//zF1
OkQIAUIIBSAlc6tfpkjr52XTj4SFi937eP3MmDAB2I5YyaT63AmyuVDHmAAQt0FOzARg/aeGhBCS
3EjnCBygMwKAnXL+AdDkiZ/xYgR3AAAAAElFTkSuQmCC"""
),
),
"file2": (
"gtk-no.png",
"image/png",
b64d(
"""
iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz
AAAN1wAADdcBQiibeAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAM1SURB
VDiNrZTPaxtHFMc/M7uzs7KobckyDjQxtoPbnlJyMBRKgsGFHkwvvfbuU/+BQCilBHrof+BT7zmV
4lsNxqUUXBo1tIdSsCy3uI7jrKSVJUveX68H7QpHSW4d+O6PeW8/vJn3nVUiwv853DcFHip1X8OG
A+sp3AVwoJ7CXga7j0T2X/edmqzwgVIVC9teqbS5tLxsZqtV9625OQAugoBOu500G404Ggx2rmDr
a5H2G4FfKPUh8P3KysrUe3fuWA14c3NoYxARsjgmbrXIlOLPp0+vGo3GJfDJVyI/vQJ8oFTFwOHa
2lqlVqmgSyXM9PQoSwQRgVxxGCJpStBuc3Bw0I7hdlGpLsgWtpdu3ZqqlssMTk6QJCFqtYiC4BVl
cczl0RGVcpmlxcUpC9sFRxcNMNZurq6u2rBex7GWtNUiDQIkCMiKe6tF2mqRBAHKdQmfPOGd1VVr
rN18qNT9cZc1bLy9sGCS01MIQ3QYohwHnS9RFfssQiaCZBk6SZAwJDo95eaNG+bw+HgD2HdzO6zP
lkqu6nSwgDo7Q/s+OstQ14CZCGTZiB1Fo9xOh5npadeB9XGFGdyd9X2ckxNcwO31cPp9HBEUoPL9
kVEuaT6ngKzbZWZhgcKrY2PrJMGIoPNJVwQn3+TrwDRXkscyEaI4Hufo/FLvXVzg+T7eqONYwM9V
uvbs5zEPMIDn+/S6XTTUx8AU9tq9Xmw8D7dIfA20ABUwAxhr6fT7cQp7Y2AGu81uN8FaXGtx8mWb
CWgBdAv5PuL7NMMwyWB3DHwksh8nyc6v5+dDU62ilcIZdR93AqQLKYVbrfLb8+fDOEl2ip/F+KRc
wdZRvz9oDgboWg2MQSYakeXvGIOen+d4MKDR6w2vYKvgjM+yUmrqM/h4Bb69ba1/r1z2SlGEmyQj
EwPiusTGMPA8fuz348PhcPg7fP4d/AC8EJHoOnAGqC3C8qfwZVWptXc9z1nQ2qm5I3cFacppmmZ/
RVF6LlJ/DN88gybwAvj3JWAOnQZqQOUjuHcTPpiH931YAhjC38/gj3/glz34GQiBc+BMRJKXlnx9
KKWc3CmFLCN/JyMuQ+ASuJIJwH8C25TmkHULEgAAAABJRU5ErkJggg=="""
),
),
},
"forms": {"text": u"this is another text with ümläüts"},
}
browser_test_cases["ie6-2png1txt"] = {
"data": b64d(
"""
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS03ZDkxYjAzYTIwMTI4DQpDb250ZW50LURpc3Bv
c2l0aW9uOiBmb3JtLWRhdGE7IG5hbWU9ImZpbGUxIjsgZmlsZW5hbWU9IkM6XFB5dGhvbjI1XHd6
dGVzdFx3ZXJremV1Zy1tYWluXHRlc3RzXG11bHRpcGFydFxmaXJlZm94My0ycG5nMXR4dFxmaWxl
MS5wbmciDQpDb250ZW50LVR5cGU6IGltYWdlL3gtcG5nDQoNColQTkcNChoKAAAADUlIRFIAAAAQ
AAAAEAgGAAAAH/P/YQAAAARnQU1BAACvyDcFiukAAAAZdEVYdFNvZnR3YXJlAEFkb2JlIEltYWdl
UmVhZHlxyWU8AAABnUlEQVQ4y6VTMWvCQBS+qwEFB10KGaS1P6FDpw7SrVvzAwRRx04VCk4K6iAo
DhLXdhFcW9qhZCk4FQoW0gp2U4lQRDAUS4hJmn5Xgg2lsQ198PHu3b3vu5d3L9S2bfIf47wOer1e
wzTNtGEYBP48kUjkfsrb8BIAMb1cLovwRfi07wrYzcCr4/1/Am4FzzhzBGZeefR7E7vd7j0Iu4wY
jUYDBMfD0dBiMUQfstns3toKkHgF6EgmqqruW6bFiHcsxr70awVu63Q6NiOmUinquwfMdF1f28CV
gCRJx0jMAQ1BEFquRn7CbYVCYZVbr9dbnJMohoIh9kViu90WEW9nMpmxu4JyubyF/VEsFiNcgCPy
oyxiu7XhCPBzdU4s652VnUccbDabPLyN2C6VSmwdhFgel5DB84AJb64mEUlvmqadTKcv40gkUkUs
g1DjeZ7iRsrWgByP71T7/afxYrHIYry/eoBD9mxsaK4VRamFw2EBQknMAWGvRClNTpQJAfkCxFNg
Bmiez1ipVA4hdgQcOD/TLfylKIo3vubgL/YBnIw+ioOMLtwAAAAASUVORK5CYIINCi0tLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tN2Q5MWIwM2EyMDEyOA0KQ29udGVudC1EaXNwb3NpdGlvbjog
Zm9ybS1kYXRhOyBuYW1lPSJmaWxlMiI7IGZpbGVuYW1lPSJDOlxQeXRob24yNVx3enRlc3Rcd2Vy
a3pldWctbWFpblx0ZXN0c1xtdWx0aXBhcnRcZmlyZWZveDMtMnBuZzF0eHRcZmlsZTIucG5nIg0K
Q29udGVudC1UeXBlOiBpbWFnZS94LXBuZw0KDQqJUE5HDQoaCgAAAA1JSERSAAAAEAAAABAIBgAA
AB/z/2EAAAAEZ0FNQQAAr8g3BYrpAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccll
PAAAAlFJREFUGBmlwd1rzXEcwPH353d+51jO5jDZFpnnJNaiUSK5mkJKMYkUSS1RSvwDpnFBkYeY
O2p2sZRceCh5mpKnUXZssVaO2Q4Hw9nO+Z3v52O/ldoFF8vrJWbG/5CNB67uXbm65lgma3EzQBVT
xanD1FBTzDnUDHMOp8qEWPCroyN1uPVE3Rm/ZkXNqWhR3CsvYiziv7LuFHDGzwbmZTM/GavBwDyG
+eaMhm1zGavdjT2EfDMllC84DDA1nIJiqBpOFVcwXMEIPt8l+/wykeIq9pXd49XZ/Tt8zAiJJ4gZ
5gkmhqjgeYKIh4hDM9eJ9j6lomo7iVmL+dY9n+StpuO+U0fIA0wEBCIGKqBqRAwK6dvEcm+Iz1tB
5l0HMclTMqGC4smVCd/UGCECZniAiYCACOT77yM/npCYvYZcbzOx8ULPyyQDWZBcptpTdfwhIiBC
yANy6fsUvtwmMWctQx8vItGvRItLiFuGK6nlLN3X2ukVgoARIogIIRGhL3md7IebJOZuYCh1Di8a
kB+YSfphO1NqG/g4OJGQZ04JRQABRIT+5A1+pNooW7iO/KcmIjEjNzCD9KMXVGw6T1H5AkyVkK+q
/CFAV1szhe+vKchUel+fZlJZjKHMdL49S1K55QLRxDRCakbIT3X3tNSfDOrUOdQptdLE5vpLvG0+
SOeDNsZVVvO9L8WNoa30NTzGVFEl1MIwMTNGO7JnUXBoV72P53h55xo93V0/E1NKV9YebW/nL8TM
GK1uVengktnl/rIFs7Borm2wP71zfeOr9/zDb6ZFKM6WU+GQAAAAAElFTkSuQmCCDQotLS0tLS0t
LS0tLS0tLS0tLS0tLS0tLS0tLS0tLTdkOTFiMDNhMjAxMjgNCkNvbnRlbnQtRGlzcG9zaXRpb246
IGZvcm0tZGF0YTsgbmFtZT0idGV4dCINCg0KaWU2IHN1Y2tzIDotLw0KLS0tLS0tLS0tLS0tLS0t
LS0tLS0tLS0tLS0tLS03ZDkxYjAzYTIwMTI4LS0NCg=="""
),
"boundary": "---------------------------7d91b03a20128",
"files": {
"file1": (
"file1.png",
"image/x-png",
b64d(
"""
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0
U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAGdSURBVDjLpVMxa8JAFL6rAQUHXQoZpLU/
oUOnDtKtW/MDBFHHThUKTgrqICgOEtd2EVxb2qFkKTgVChbSCnZTiVBEMBRLiEmafleCDaWxDX3w
8e7dve+7l3cv1LZt8h/jvA56vV7DNM20YRgE/jyRSOR+ytvwEgAxvVwui/BF+LTvCtjNwKvj/X8C
bgXPOHMEZl559HsTu93uPQi7jBiNRgMEx8PR0GIxRB+y2eze2gqQeAXoSCaqqu5bpsWIdyzGvvRr
BW7rdDo2I6ZSKeq7B8x0XV/bwJWAJEnHSMwBDUEQWq5GfsJthUJhlVuv11uckyiGgiH2RWK73RYR
b2cymbG7gnK5vIX9USwWI1yAI/KjLGK7teEI8HN1TizrnZWdRxxsNps8vI3YLpVKbB2EWB6XkMHz
gAlvriYRSW+app1Mpy/jSCRSRSyDUON5nuJGytaAHI/vVPv9p/FischivL96gEP2bGxorhVFqYXD
YQFCScwBYa9EKU1OlAkB+QLEU2AGaJ7PWKlUDiF2BBw4P9Mt/KUoije+5uAv9gGcjD6Kg4wu3AAA
AABJRU5ErkJggg=="""
),
),
"file2": (
"file2.png",
"image/x-png",
b64d(
"""
iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0
U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJRSURBVBgZpcHda81xHMDx9+d3fudYzuYw
2RaZ5yTWolEiuZpCSjGJFEktUUr8A6ZxQZGHmDtqdrGUXHgoeZqSp1F2bLFWjtkOB8PZzvmd7+dj
v5XaBRfL6yVmxv+QjQeu7l25uuZYJmtxM0AVU8Wpw9RQU8w51AxzDqfKhFjwq6Mjdbj1RN0Zv2ZF
zaloUdwrL2Is4r+y7hRwxs8G5mUzPxmrwcA8hvnmjIZtcxmr3Y09hHwzJZQvOAwwNZyCYqgaThVX
MFzBCD7fJfv8MpHiKvaV3ePV2f07fMwIiSeIGeYJJoao4HmCiIeIQzPXifY+paJqO4lZi/nWPZ/k
rabjvlNHyANMBAQiBiqgakQMCunbxHJviM9bQeZdBzHJUzKhguLJlQnf1BghAmZ4gImAgAjk++8j
P56QmL2GXG8zsfFCz8skA1mQXKbaU3X8ISIgQsgDcun7FL7cJjFnLUMfLyLRr0SLS4hbhiup5Szd
19rpFYKAESKICCERoS95neyHmyTmbmAodQ4vGpAfmEn6YTtTahv4ODiRkGdOCUUAAUSE/uQNfqTa
KFu4jvynJiIxIzcwg/SjF1RsOk9R+QJMlZCvqvwhQFdbM4XvrynIVHpfn2ZSWYyhzHS+PUtSueUC
0cQ0QmpGyE9197TUnwzq1DnUKbXSxOb6S7xtPkjngzbGVVbzvS/FjaGt9DU8xlRRJdTCMDEzRjuy
Z1FwaFe9j+d4eecaPd1dPxNTSlfWHm1v5y/EzBitblXp4JLZ5f6yBbOwaK5tsD+9c33jq/f8w2+m
RSjOllPhkAAAAABJRU5ErkJggg=="""
),
),
},
"forms": {"text": u"ie6 sucks :-/"},
}
| TestBrokenMultipart |
python | fluentpython__example-code | 03-dict-set/strkeydict0.py | {
"start": 604,
"end": 1046
} | class ____(dict): # <1>
def __missing__(self, key):
if isinstance(key, str): # <2>
raise KeyError(key)
return self[str(key)] # <3>
def get(self, key, default=None):
try:
return self[key] # <4>
except KeyError:
return default # <5>
def __contains__(self, key):
return key in self.keys() or str(key) in self.keys() # <6>
# END STRKEYDICT0
| StrKeyDict0 |
python | google__pytype | pytype/overlays/enum_overlay.py | {
"start": 2491,
"end": 7909
} | class ____(abstract.PyTDClass):
"""Overlays enum.Enum."""
def __init__(self, name, ctx, module):
super().__init__(name, ctx.loader.lookup_pytd(module, name), ctx)
def make_class(self, node, props):
"""Check the members for errors, then create the enum class."""
# TODO(tsudol): Handle decorators: @enum.unique, for example.
# make_class intercepts the class creation for enums in order to check for
# errors. EnumMeta turns the class into a full enum, but that's too late for
# proper error checking.
# TODO(tsudol): Check enum validity.
# Enums have a specific ordering for base classes:
# https://docs.python.org/3/library/enum.html#restricted-enum-subclassing
# Mostly, we just care that the last base is some kind of Enum.
# It should be impossible for bases to be empty.
props.bases = props.bases or [self.to_variable(node)]
last_base = props.bases[-1]
if not any(b.is_enum for b in last_base.data):
msg = (
"The last base class for an enum must be enum.Enum or a subclass "
"of enum.Enum"
)
self.ctx.errorlog.base_class_error(
self.ctx.vm.frames, last_base, details=msg
)
return node, self.ctx.new_unsolvable(node)
props.metaclass_var = props.metaclass_var or (
self.ctx.vm.loaded_overlays["enum"].members["EnumMeta"]
)
props.class_type = EnumInstance
return self.ctx.make_class(node, props)
def call(self, node, func, args, alias_map=None):
"""Implements the behavior of the enum functional API."""
# Because of how this is called, we supply our own "self" argument.
# See abstract.Class._call_new_and_init.
args = args.simplify(node, self.ctx)
args = args.replace(posargs=(self.ctx.new_unsolvable(node),) + args.posargs)
# It's possible that this class has been called in order to look up an enum
# member, e.g. on something annotated as Type[Enum].
# First, check the lookup API. If that succeeds, return the result.
# If not, check against the functional API.
# Note that super().call or _call_new_and_init won't work here, because
# they don't raise FailedFunctionCall.
node, pytd_new_var = self.ctx.attribute_handler.get_attribute(
node, self, "__new__", self.to_binding(node)
)
pytd_new = abstract_utils.get_atomic_value(pytd_new_var)
# There are 2 signatures for Enum.__new__. The one with fewer arguments is
# for looking up values, and the other is for the functional API.
# I don't think we have a guarantee of ordering for signatures, so choose
# them based on parameter count.
lookup_sig, api_sig = sorted(
(s.signature for s in pytd_new.signatures),
key=lambda s: s.maximum_param_count(),
)
lookup_new = abstract.SimpleFunction(lookup_sig, self.ctx)
try:
return lookup_new.call(node, None, args, alias_map)
except error_types.FailedFunctionCall as e:
log.info("Called Enum.__new__ as lookup, but failed:\n%s", e)
api_new = abstract.SimpleFunction(api_sig, self.ctx)
api_new.call(node, None, args, alias_map)
# At this point, we know this is a functional API call.
argmap = {name: var for name, var, _ in api_sig.iter_args(args)}
cls_name_var = argmap["value"]
try:
names = abstract_utils.get_atomic_python_constant(argmap["names"])
except abstract_utils.ConversionError as e:
log.info("Failed to unwrap values in enum functional interface:\n%s", e)
return node, self.ctx.new_unsolvable(node)
if isinstance(names, str):
names = names.replace(",", " ").split()
fields = {name: self.ctx.convert.build_int(node) for name in names}
elif isinstance(names, dict):
# Dict keys are strings, not strings in variables. The values are
# variables, they don't need to be changed.
fields = names
else:
# List of names, or list of (name, value) pairs.
try:
possible_pairs = [
abstract_utils.get_atomic_python_constant(p) for p in names
]
except abstract_utils.ConversionError as e:
log.debug("Failed to unwrap possible enum field pairs:\n %s", e)
return node, self.ctx.new_unsolvable(node)
if not possible_pairs:
fields = {}
elif isinstance(possible_pairs[0], str):
fields = {
name: self.ctx.convert.build_int(node) for name in possible_pairs
}
else:
# List of (name_var, value_var) pairs.
# The earlier get_atomic_python_constant call only unwrapped the tuple,
# so the values in the tuple still need to be unwrapped.
try:
fields = {
abstract_utils.get_atomic_python_constant(name): value
for name, value in possible_pairs
}
except abstract_utils.ConversionError as e:
log.debug("Failed to unwrap field names for enum:\n %s", e)
return node, self.ctx.new_unsolvable(node)
cls_dict = abstract.Dict(self.ctx)
cls_dict.update(node, fields)
metaclass = self.ctx.vm.loaded_overlays["enum"].members["EnumMeta"]
props = class_mixin.ClassBuilderProperties(
name_var=cls_name_var,
bases=[self.to_variable(node)],
class_dict_var=cls_dict.to_variable(node),
metaclass_var=metaclass,
class_type=EnumInstance,
)
return self.ctx.make_class(node, props)
| EnumBuilder |
python | vyperlang__vyper | vyper/semantics/analysis/imports.py | {
"start": 2430,
"end": 13381
} | class ____:
seen: OrderedSet[vy_ast.Module]
_compiler_inputs: dict[CompilerInput, vy_ast.Module]
toplevel_module: vy_ast.Module
def __init__(self, input_bundle: InputBundle, graph: _ImportGraph, module_ast: vy_ast.Module):
self.input_bundle = input_bundle
self.graph = graph
self.toplevel_module = module_ast
self._ast_of: dict[int, vy_ast.Module] = {}
self.seen = OrderedSet()
# keep around compiler inputs so when we construct the output
# bundle, we have access to the compiler input for each module
self._compiler_inputs = {}
self._integrity_sum = None
# should be all system paths + topmost module path
self.absolute_search_paths = input_bundle.search_paths.copy()
def resolve_imports(self):
self._resolve_imports_r(self.toplevel_module)
self._integrity_sum = self._calculate_integrity_sum_r(self.toplevel_module)
@property
def compiler_inputs(self) -> dict[CompilerInput, vy_ast.Module]:
return self._compiler_inputs
def _calculate_integrity_sum_r(self, module_ast: vy_ast.Module):
acc = [sha256sum(module_ast.full_source_code)]
for s in module_ast.get_children((vy_ast.Import, vy_ast.ImportFrom)):
for info in s._metadata["import_infos"]:
if info.compiler_input.path.suffix in (".vyi", ".json"):
# NOTE: this needs to be redone if interfaces can import other interfaces
acc.append(info.compiler_input.sha256sum)
else:
acc.append(self._calculate_integrity_sum_r(info.parsed))
return sha256sum("".join(acc))
def _resolve_imports_r(self, module_ast: vy_ast.Module):
if module_ast in self.seen:
return
with self.graph.enter_path(module_ast):
for node in module_ast.body:
with tag_exceptions(node):
if isinstance(node, vy_ast.Import):
self._handle_Import(node)
elif isinstance(node, vy_ast.ImportFrom):
self._handle_ImportFrom(node)
self.seen.add(module_ast)
def _handle_Import(self, node: vy_ast.Import):
# import x.y as y
self._add_imports(node, 0, "")
def _handle_ImportFrom(self, node: vy_ast.ImportFrom):
# from m.n[module_prefix] import x as y
module_prefix = node.module or ""
if module_prefix:
module_prefix += "."
self._add_imports(node, node.level, module_prefix)
def _add_imports(
self, import_node: vy_ast.Import | vy_ast.ImportFrom, level: int, module_prefix: str
) -> None:
for alias_node in import_node.names:
# x.y[name] as y[alias]
name = alias_node.name
alias = alias_node.asname
if alias is None:
alias = name
# don't handle things like `import x.y`
if "." in alias:
msg = "import requires an accompanying `as` statement"
suggested_alias = name[name.rfind(".") + 1 :]
hint = f"try `import {name} as {suggested_alias}`"
raise StructureException(msg, alias_node, hint=hint)
qualified_module_name = module_prefix + name
# Set on alias_node for more precise error messages
compiler_input, ast = self._load_import(alias_node, level, qualified_module_name, alias)
self._compiler_inputs[compiler_input] = ast
if "import_infos" not in import_node._metadata:
import_node._metadata["import_infos"] = list()
import_node._metadata["import_infos"].append(
ImportInfo(alias, qualified_module_name, compiler_input, ast)
)
# load an InterfaceT or ModuleInfo from an import.
# raises FileNotFoundError
def _load_import(
self, node: vy_ast.VyperNode, level: int, module_str: str, alias: str
) -> tuple[CompilerInput, Any]:
if _is_builtin(level, module_str):
return _load_builtin_import(level, module_str)
path = _import_to_path(level, module_str)
if path in self.graph.imported_modules:
previous_import_stmt = self.graph.imported_modules[path]
raise DuplicateImport(f"{alias} imported more than once!", previous_import_stmt, node)
self.graph.imported_modules[path] = node
err = None
try:
path_vy = path.with_suffix(".vy")
file = self._load_file(path_vy, level)
assert isinstance(file, FileInput) # mypy hint
module_ast = self._ast_from_file(file)
self._resolve_imports_r(module_ast)
return file, module_ast
except FileNotFoundError as e:
# escape `e` from the block scope, it can make things
# easier to debug.
err = e
try:
file = self._load_file(path.with_suffix(".vyi"), level)
assert isinstance(file, FileInput) # mypy hint
module_ast = self._ast_from_file(file)
self._resolve_imports_r(module_ast)
return file, module_ast
except FileNotFoundError:
pass
try:
file = self._load_file(path.with_suffix(".json"), level)
if isinstance(file, FileInput):
file = try_parse_abi(file)
assert isinstance(file, JSONInput) # mypy hint
return file, file.data
except FileNotFoundError:
pass
hint = None
if module_str.startswith("vyper.interfaces"):
hint = "try renaming `vyper.interfaces` to `ethereum.ercs`"
# copy search_paths, makes debugging a bit easier
search_paths = self.input_bundle.search_paths.copy() # noqa: F841
raise ModuleNotFound(module_str, hint=hint) from err
def _load_file(self, path: PathLike, level: int) -> CompilerInput:
ast = self.graph.current_module
search_paths: list[PathLike] # help mypy
if level != 0: # relative import
search_paths = [Path(ast.resolved_path).parent]
else:
search_paths = self.absolute_search_paths
with self.input_bundle.temporary_search_paths(search_paths):
return self.input_bundle.load_file(path)
def _ast_from_file(self, file: FileInput) -> vy_ast.Module:
# cache ast if we have seen it before.
# this gives us the additional property of object equality on
# two ASTs produced from the same source
ast_of = self._ast_of
if file.source_id not in ast_of:
ast_of[file.source_id] = _parse_ast(file)
return ast_of[file.source_id]
def _parse_ast(file: FileInput) -> vy_ast.Module:
module_path = file.resolved_path # for error messages
try:
# try to get a relative path, to simplify the error message
cwd = Path(".")
if module_path.is_absolute():
cwd = cwd.resolve()
module_path = module_path.relative_to(cwd)
except ValueError:
# we couldn't get a relative path (cf. docs for Path.relative_to),
# use the resolved path given to us by the InputBundle
pass
is_interface = file.resolved_path.suffix == ".vyi"
ret = vy_ast.parse_to_ast(
file.source_code,
source_id=file.source_id,
module_path=module_path.as_posix(),
resolved_path=file.resolved_path.as_posix(),
is_interface=is_interface,
)
return ret
# convert an import to a path (without suffix)
def _import_to_path(level: int, module_str: str) -> PurePath:
base_path = ""
if level > 1:
base_path = "../" * (level - 1)
elif level == 1:
base_path = "./"
return PurePath(f"{base_path}{module_str.replace('.', '/')}/")
_builtins_cache: dict[PathLike, tuple[CompilerInput, vy_ast.Module]] = {}
# builtin import path -> (prefix for removal, package, suffix)
BUILTIN_MODULE_RULES = {
"ethereum.ercs": ("ethereum.ercs", vyper.builtins.interfaces.__package__, ".vyi"),
"math": ("", vyper.builtins.stdlib.__package__, ".vy"),
}
# TODO: could move this to analysis/common.py or something
def _get_builtin_prefix(module_str: str) -> Optional[str]:
for prefix in BUILTIN_MODULE_RULES.keys():
if module_str.startswith(prefix):
return prefix
return None
def _is_builtin(level: int, module_str: str) -> bool:
return level == 0 and _get_builtin_prefix(module_str) is not None
def _load_builtin_import(level: int, module_str: str) -> tuple[CompilerInput, vy_ast.Module]:
module_prefix = _get_builtin_prefix(module_str)
assert module_prefix is not None, "unreachable"
assert level == 0, "builtin imports are absolute"
builtins_path = vyper.builtins.__path__[0]
# hygiene: convert to relpath to avoid leaking user directory info
# (note Path.relative_to cannot handle absolute to relative path
# conversion, so we must use the `os` module).
builtins_path = safe_relpath(builtins_path)
search_path = Path(builtins_path).parent.parent
# generate an input bundle just because it knows how to build paths.
input_bundle = FilesystemInputBundle([search_path])
remove_prefix, target_package, suffix = BUILTIN_MODULE_RULES[module_prefix]
base_name = module_str.removeprefix(remove_prefix + ".")
remapped_module = f"{target_package}.{base_name}"
path = _import_to_path(level, remapped_module)
path = path.with_suffix(suffix)
# builtins are globally the same, so we can safely cache them
# (it is also *correct* to cache them, so that types defined in builtins
# compare correctly using pointer-equality.)
if path in _builtins_cache:
file, ast = _builtins_cache[path]
return file, ast
try:
file = input_bundle.load_file(path)
# set source_id to builtin sentinel value
file = dc.replace(file, source_id=BUILTIN)
assert isinstance(file, FileInput) # mypy hint
except FileNotFoundError as e:
hint = None
components = module_str.split(".")
# common issue for upgrading codebases from v0.3.x to v0.4.x -
# hint: rename ERC20 to IERC20
if components[-1].startswith("ERC"):
module_prefix = components[-1]
hint = f"try renaming `{module_prefix}` to `I{module_prefix}`"
raise ModuleNotFound(module_str, hint=hint) from e
builtin_ast = _parse_ast(file)
# no recursion needed since builtins don't have any imports
_builtins_cache[path] = file, builtin_ast
return file, builtin_ast
def resolve_imports(module_ast: vy_ast.Module, input_bundle: InputBundle):
graph = _ImportGraph()
analyzer = ImportAnalyzer(input_bundle, graph, module_ast)
analyzer.resolve_imports()
return analyzer
| ImportAnalyzer |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/build_env_compiler_var_a/package.py | {
"start": 217,
"end": 505
} | class ____(Package):
"""Package with runtime variable that should be dropped in the parent's build environment."""
url = "https://www.example.com"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("build-env-compiler-var-b", type="build")
| BuildEnvCompilerVarA |
python | vyperlang__vyper | vyper/venom/analysis/analysis.py | {
"start": 849,
"end": 2773
} | class ____:
"""
A cache for IR analyses.
"""
function: IRFunction
analyses_cache: dict[Type[IRAnalysis], IRAnalysis]
def __init__(self, function: IRFunction):
self.analyses_cache = {}
self.function = function
# python3.12:
# def request_analysis[T](self, analysis_cls: Type[T], *args, **kwargs) -> T:
def request_analysis(self, analysis_cls: Type[T], *args, **kwargs) -> T:
"""
Request a specific analysis to be run on the IR. The result is cached and
returned if the analysis has already been run.
"""
assert issubclass(analysis_cls, IRAnalysis), f"{analysis_cls} is not an IRAnalysis"
if analysis_cls in self.analyses_cache:
ret = self.analyses_cache[analysis_cls]
assert isinstance(ret, analysis_cls) # help mypy
return ret
analysis = analysis_cls(self, self.function)
self.analyses_cache[analysis_cls] = analysis
analysis.analyze(*args, **kwargs)
return analysis
def invalidate_analysis(self, analysis_cls: Type[IRAnalysis]):
"""
Invalidate a specific analysis. This will remove the analysis from the cache.
"""
assert issubclass(analysis_cls, IRAnalysis), f"{analysis_cls} is not an IRAnalysis"
analysis = self.analyses_cache.pop(analysis_cls, None)
if analysis is not None:
analysis.invalidate()
def force_analysis(self, analysis_cls: Type[T], *args, **kwargs) -> T:
"""
Force a specific analysis to be run on the IR even if it has already been run,
and is cached.
"""
assert issubclass(analysis_cls, IRAnalysis), f"{analysis_cls} is not an IRAnalysis"
if analysis_cls in self.analyses_cache:
self.invalidate_analysis(analysis_cls)
return self.request_analysis(analysis_cls, *args, **kwargs)
| IRAnalysesCache |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-connect-sticks.py | {
"start": 48,
"end": 417
} | class ____(object):
def connectSticks(self, sticks):
"""
:type sticks: List[int]
:rtype: int
"""
heapq.heapify(sticks)
result = 0
while len(sticks) > 1:
x, y = heapq.heappop(sticks), heapq.heappop(sticks)
result += x+y
heapq.heappush(sticks, x+y)
return result
| Solution |
python | numpy__numpy | numpy/ma/tests/test_extras.py | {
"start": 35169,
"end": 51499
} | class ____:
def test_pytype(self):
r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1)
assert_equal(r, np.inf)
def test_inf(self):
# test that even which computes handles inf / x = masked
r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
[np.inf, np.inf]]), axis=-1)
assert_equal(r, np.inf)
r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
[np.inf, np.inf]]), axis=None)
assert_equal(r, np.inf)
# all masked
r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
[np.inf, np.inf]], mask=True),
axis=-1)
assert_equal(r.mask, True)
r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
[np.inf, np.inf]], mask=True),
axis=None)
assert_equal(r.mask, True)
def test_non_masked(self):
x = np.arange(9)
assert_equal(np.ma.median(x), 4.)
assert_(type(np.ma.median(x)) is not MaskedArray)
x = range(8)
assert_equal(np.ma.median(x), 3.5)
assert_(type(np.ma.median(x)) is not MaskedArray)
x = 5
assert_equal(np.ma.median(x), 5.)
assert_(type(np.ma.median(x)) is not MaskedArray)
# integer
x = np.arange(9 * 8).reshape(9, 8)
assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0))
assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1))
assert_(np.ma.median(x, axis=1) is not MaskedArray)
# float
x = np.arange(9 * 8.).reshape(9, 8)
assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0))
assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1))
assert_(np.ma.median(x, axis=1) is not MaskedArray)
def test_docstring_examples(self):
"test the examples given in the docstring of ma.median"
x = array(np.arange(8), mask=[0] * 4 + [1] * 4)
assert_equal(np.ma.median(x), 1.5)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
x = array(np.arange(10).reshape(2, 5), mask=[0] * 6 + [1] * 4)
assert_equal(np.ma.median(x), 2.5)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
ma_x = np.ma.median(x, axis=-1, overwrite_input=True)
assert_equal(ma_x, [2., 5.])
assert_equal(ma_x.shape, (2,), "shape mismatch")
assert_(type(ma_x) is MaskedArray)
def test_axis_argument_errors(self):
msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s"
for ndmin in range(5):
for mask in [False, True]:
x = array(1, ndmin=ndmin, mask=mask)
# Valid axis values should not raise exception
args = itertools.product(range(-ndmin, ndmin), [False, True])
for axis, over in args:
try:
np.ma.median(x, axis=axis, overwrite_input=over)
except Exception:
raise AssertionError(msg % (mask, ndmin, axis, over))
# Invalid axis values should raise exception
args = itertools.product([-(ndmin + 1), ndmin], [False, True])
for axis, over in args:
try:
np.ma.median(x, axis=axis, overwrite_input=over)
except np.exceptions.AxisError:
pass
else:
raise AssertionError(msg % (mask, ndmin, axis, over))
def test_masked_0d(self):
# Check values
x = array(1, mask=False)
assert_equal(np.ma.median(x), 1)
x = array(1, mask=True)
assert_equal(np.ma.median(x), np.ma.masked)
def test_masked_1d(self):
x = array(np.arange(5), mask=True)
assert_equal(np.ma.median(x), np.ma.masked)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant)
x = array(np.arange(5), mask=False)
assert_equal(np.ma.median(x), 2.)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
x = array(np.arange(5), mask=[0, 1, 0, 0, 0])
assert_equal(np.ma.median(x), 2.5)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
x = array(np.arange(5), mask=[0, 1, 1, 1, 1])
assert_equal(np.ma.median(x), 0.)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
# integer
x = array(np.arange(5), mask=[0, 1, 1, 0, 0])
assert_equal(np.ma.median(x), 3.)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
# float
x = array(np.arange(5.), mask=[0, 1, 1, 0, 0])
assert_equal(np.ma.median(x), 3.)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
# integer
x = array(np.arange(6), mask=[0, 1, 1, 1, 1, 0])
assert_equal(np.ma.median(x), 2.5)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
# float
x = array(np.arange(6.), mask=[0, 1, 1, 1, 1, 0])
assert_equal(np.ma.median(x), 2.5)
assert_equal(np.ma.median(x).shape, (), "shape mismatch")
assert_(type(np.ma.median(x)) is not MaskedArray)
def test_1d_shape_consistency(self):
assert_equal(np.ma.median(array([1, 2, 3], mask=[0, 0, 0])).shape,
np.ma.median(array([1, 2, 3], mask=[0, 1, 0])).shape)
def test_2d(self):
# Tests median w/ 2D
(n, p) = (101, 30)
x = masked_array(np.linspace(-1., 1., n),)
x[:10] = x[-10:] = masked
z = masked_array(np.empty((n, p), dtype=float))
z[:, 0] = x[:]
idx = np.arange(len(x))
for i in range(1, p):
np.random.shuffle(idx)
z[:, i] = x[idx]
assert_equal(median(z[:, 0]), 0)
assert_equal(median(z), 0)
assert_equal(median(z, axis=0), np.zeros(p))
assert_equal(median(z.T, axis=1), np.zeros(p))
def test_2d_waxis(self):
# Tests median w/ 2D arrays and different axis.
x = masked_array(np.arange(30).reshape(10, 3))
x[:3] = x[-3:] = masked
assert_equal(median(x), 14.5)
assert_(type(np.ma.median(x)) is not MaskedArray)
assert_equal(median(x, axis=0), [13.5, 14.5, 15.5])
assert_(type(np.ma.median(x, axis=0)) is MaskedArray)
assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0])
assert_(type(np.ma.median(x, axis=1)) is MaskedArray)
assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1])
def test_3d(self):
# Tests median w/ 3D
x = np.ma.arange(24).reshape(3, 4, 2)
x[x % 3 == 0] = masked
assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]])
x = x.reshape((4, 3, 2))
assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]])
x = np.ma.arange(24).reshape(4, 3, 2)
x[x % 5 == 0] = masked
assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]])
def test_neg_axis(self):
x = masked_array(np.arange(30).reshape(10, 3))
x[:3] = x[-3:] = masked
assert_equal(median(x, axis=-1), median(x, axis=1))
def test_out_1d(self):
# integer float even odd
for v in (30, 30., 31, 31.):
x = masked_array(np.arange(v))
x[:3] = x[-3:] = masked
out = masked_array(np.ones(()))
r = median(x, out=out)
if v == 30:
assert_equal(out, 14.5)
else:
assert_equal(out, 15.)
assert_(r is out)
assert_(type(r) is MaskedArray)
def test_out(self):
# integer float even odd
for v in (40, 40., 30, 30.):
x = masked_array(np.arange(v).reshape(10, -1))
x[:3] = x[-3:] = masked
out = masked_array(np.ones(10))
r = median(x, axis=1, out=out)
if v == 30:
e = masked_array([0.] * 3 + [10, 13, 16, 19] + [0.] * 3,
mask=[True] * 3 + [False] * 4 + [True] * 3)
else:
e = masked_array([0.] * 3 + [13.5, 17.5, 21.5, 25.5] + [0.] * 3,
mask=[True] * 3 + [False] * 4 + [True] * 3)
assert_equal(r, e)
assert_(r is out)
assert_(type(r) is MaskedArray)
@pytest.mark.parametrize(
argnames='axis',
argvalues=[
None,
1,
(1, ),
(0, 1),
(-3, -1),
]
)
def test_keepdims_out(self, axis):
mask = np.zeros((3, 5, 7, 11), dtype=bool)
# Randomly set some elements to True:
w = np.random.random((4, 200)) * np.array(mask.shape)[:, None]
w = w.astype(np.intp)
mask[tuple(w)] = np.nan
d = masked_array(np.ones(mask.shape), mask=mask)
if axis is None:
shape_out = (1,) * d.ndim
else:
axis_norm = normalize_axis_tuple(axis, d.ndim)
shape_out = tuple(
1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
out = masked_array(np.empty(shape_out))
result = median(d, axis=axis, keepdims=True, out=out)
assert result is out
assert_equal(result.shape, shape_out)
def test_single_non_masked_value_on_axis(self):
data = [[1., 0.],
[0., 3.],
[0., 0.]]
masked_arr = np.ma.masked_equal(data, 0)
expected = [1., 3.]
assert_array_equal(np.ma.median(masked_arr, axis=0),
expected)
def test_nan(self):
for mask in (False, np.zeros(6, dtype=bool)):
dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
dm.mask = mask
# scalar result
r = np.ma.median(dm, axis=None)
assert_(np.isscalar(r))
assert_array_equal(r, np.nan)
r = np.ma.median(dm.ravel(), axis=0)
assert_(np.isscalar(r))
assert_array_equal(r, np.nan)
r = np.ma.median(dm, axis=0)
assert_equal(type(r), MaskedArray)
assert_array_equal(r, [1, np.nan, 3])
r = np.ma.median(dm, axis=1)
assert_equal(type(r), MaskedArray)
assert_array_equal(r, [np.nan, 2])
r = np.ma.median(dm, axis=-1)
assert_equal(type(r), MaskedArray)
assert_array_equal(r, [np.nan, 2])
dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
dm[:, 2] = np.ma.masked
assert_array_equal(np.ma.median(dm, axis=None), np.nan)
assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3])
assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5])
def test_out_nan(self):
o = np.ma.masked_array(np.zeros((4,)))
d = np.ma.masked_array(np.ones((3, 4)))
d[2, 1] = np.nan
d[2, 2] = np.ma.masked
assert_equal(np.ma.median(d, 0, out=o), o)
o = np.ma.masked_array(np.zeros((3,)))
assert_equal(np.ma.median(d, 1, out=o), o)
o = np.ma.masked_array(np.zeros(()))
assert_equal(np.ma.median(d, out=o), o)
def test_nan_behavior(self):
a = np.ma.masked_array(np.arange(24, dtype=float))
a[::3] = np.ma.masked
a[2] = np.nan
assert_array_equal(np.ma.median(a), np.nan)
assert_array_equal(np.ma.median(a, axis=0), np.nan)
a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4))
a.mask = np.arange(a.size) % 2 == 1
aorig = a.copy()
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
assert_array_equal(np.ma.median(a), np.nan)
assert_(np.isscalar(np.ma.median(a)))
# axis0
b = np.ma.median(aorig, axis=0)
b[2, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.ma.median(a, 0), b)
# axis1
b = np.ma.median(aorig, axis=1)
b[1, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.ma.median(a, 1), b)
# axis02
b = np.ma.median(aorig, axis=(0, 2))
b[1] = np.nan
b[2] = np.nan
assert_equal(np.ma.median(a, (0, 2)), b)
def test_ambigous_fill(self):
# 255 is max value, used as filler for sort
a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8)
a = np.ma.masked_array(a, mask=a == 3)
assert_array_equal(np.ma.median(a, axis=1), 255)
assert_array_equal(np.ma.median(a, axis=1).mask, False)
assert_array_equal(np.ma.median(a, axis=0), a[0])
assert_array_equal(np.ma.median(a), 255)
def test_special(self):
for inf in [np.inf, -np.inf]:
a = np.array([[inf, np.nan], [np.nan, np.nan]])
a = np.ma.masked_array(a, mask=np.isnan(a))
assert_equal(np.ma.median(a, axis=0), [inf, np.nan])
assert_equal(np.ma.median(a, axis=1), [inf, np.nan])
assert_equal(np.ma.median(a), inf)
a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]])
a = np.ma.masked_array(a, mask=np.isnan(a))
assert_array_equal(np.ma.median(a, axis=1), inf)
assert_array_equal(np.ma.median(a, axis=1).mask, False)
assert_array_equal(np.ma.median(a, axis=0), a[0])
assert_array_equal(np.ma.median(a), inf)
# no mask
a = np.array([[inf, inf], [inf, inf]])
assert_equal(np.ma.median(a), inf)
assert_equal(np.ma.median(a, axis=0), inf)
assert_equal(np.ma.median(a, axis=1), inf)
a = np.array([[inf, 7, -inf, -9],
[-10, np.nan, np.nan, 5],
[4, np.nan, np.nan, inf]],
dtype=np.float32)
a = np.ma.masked_array(a, mask=np.isnan(a))
if inf > 0:
assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.])
assert_equal(np.ma.median(a), 4.5)
else:
assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.])
assert_equal(np.ma.median(a), -2.5)
assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf])
for i in range(10):
for j in range(1, 10):
a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
a = np.ma.masked_array(a, mask=np.isnan(a))
assert_equal(np.ma.median(a), inf)
assert_equal(np.ma.median(a, axis=1), inf)
assert_equal(np.ma.median(a, axis=0),
([np.nan] * i) + [inf] * j)
def test_empty(self):
# empty arrays
a = np.ma.masked_array(np.array([], dtype=float))
with pytest.warns(RuntimeWarning):
assert_array_equal(np.ma.median(a), np.nan)
# multiple dimensions
a = np.ma.masked_array(np.array([], dtype=float, ndmin=3))
# no axis
with pytest.warns(RuntimeWarning):
assert_array_equal(np.ma.median(a), np.nan)
# axis 0 and 1
b = np.ma.masked_array(np.array([], dtype=float, ndmin=2))
assert_equal(np.ma.median(a, axis=0), b)
assert_equal(np.ma.median(a, axis=1), b)
# axis 2
b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2))
with pytest.warns(RuntimeWarning):
assert_equal(np.ma.median(a, axis=2), b)
def test_object(self):
o = np.ma.masked_array(np.arange(7.))
assert_(type(np.ma.median(o.astype(object))), float)
o[2] = np.nan
assert_(type(np.ma.median(o.astype(object))), float)
| TestMedian |
python | tensorflow__tensorflow | tensorflow/compiler/tests/giant_const_op_test.py | {
"start": 2149,
"end": 4089
} | class ____(test.TestCase):
# Verifies that graphs containing giant const tensors that won't fit in memory
# are compiled correctly to HLO.
def testGiantConst(self):
# Disabling Mlir bridge since using the tf2xla implementation of
# StridedSliceop which would get executed in this GiantConst test.
config.disable_mlir_bridge()
strategy = get_tpu_strategy()
types = {
dtypes.bool,
dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64,
dtypes.float16, dtypes.bfloat16,
dtypes.float32, dtypes.float64,
}
for dtype in types:
values = [True if dtype is dtypes.bool else 1]
if dtype is dtypes.bool:
values.append(False)
elif dtype is not dtypes.float64:
# TPUs don't follow IEEE 754 float64 standard for 64 bit floating point
# numbers so it could return different output even with just data
# transformation ops without any arithmetic operations.
values.extend([dtype.min, dtype.max])
for value in values:
@def_function.function
def train_step():
# pylint: disable=cell-var-from-loop
def computation():
const = constant_op.constant(value, dtype=dtype, shape=[1024]*4)
return const[:1, :1, :1, :1]
return strategy.run(computation, args=())
output = strategy.experimental_local_results(train_step())[0]
expected = np.full((1, 1, 1, 1), value)
self.assertAllEqual(output, expected)
if __name__ == "__main__":
# Make sure TF_XLA_FLAGS is not already set to avoid dropping the existing
# value silently.
assert "TF_XLA_FLAGS" not in os.environ
# Disable tfxla constant folding that always creates full Tensors and will
# fail for giant tensors.
os.environ["TF_XLA_FLAGS"] = "--tf_xla_disable_constant_folding=true"
test.main()
| GiantConstOp |
python | nedbat__coveragepy | tests/test_venv.py | {
"start": 5928,
"end": 14289
} | class ____(CoverageTest):
"""Tests of virtualenv considerations."""
expected_stdout = "33\n110\n198\n1.5\n"
@pytest.fixture(autouse=True)
def in_venv_world_fixture(self, venv_world: Path) -> Iterable[None]:
"""For running tests inside venv_world, and cleaning up made files."""
with change_dir(venv_world):
self.make_file(
"myproduct.py",
"""\
import colorsys
import third
import nspkg.fifth
import nspkg.sixth
print(third.third(11))
print(nspkg.fifth.fifth(22))
print(nspkg.sixth.sixth(33))
print(sum(colorsys.rgb_to_hls(1, 0, 0)))
""",
)
self.del_environ("COVERAGE_TESTING") # To get realistic behavior
self.set_environ("COVERAGE_DEBUG_FILE", "debug_out.txt")
self.set_environ("COVERAGE_DEBUG", "trace")
yield
for fname in os.listdir("."):
if fname not in {"venv", "another_pkg", "bug888"}:
os.remove(fname)
def get_trace_output(self) -> str:
"""Get the debug output of coverage.py"""
with open("debug_out.txt", encoding="utf-8") as f:
return f.read()
@pytest.mark.parametrize("install_source_in_venv", [True, False])
def test_third_party_venv_isnt_measured(
self,
coverage_command: str,
install_source_in_venv: bool,
) -> None:
if install_source_in_venv:
make_file(
"setup.py",
"""\
import setuptools
setuptools.setup(
name="myproduct",
py_modules = ["myproduct"],
)
""",
)
try:
run_in_venv("python -m pip install .")
finally:
shutil.rmtree("build", ignore_errors=True)
shutil.rmtree("myproduct.egg-info", ignore_errors=True)
# Ensure that coverage doesn't run the non-installed module.
os.remove("myproduct.py")
out = run_in_venv(coverage_command + " run --source=.,myproduct -m myproduct")
else:
out = run_in_venv(coverage_command + " run --source=. myproduct.py")
# In particular, this warning doesn't appear:
# Already imported a file that will be measured: .../coverage/__main__.py
assert out == self.expected_stdout
# Check that our tracing was accurate. Files are mentioned because
# --source refers to a file.
debug_out = self.get_trace_output()
assert re_lines(
r"^Not tracing .*\bexecfile.py': inside --source, but is third-party",
debug_out,
)
assert re_lines(r"^Tracing .*\bmyproduct.py", debug_out)
assert re_lines(
r"^Not tracing .*\bcolorsys.py': (module 'colorsys' |)?falls outside the --source spec",
debug_out,
)
out = run_in_venv(coverage_command + " report")
assert "myproduct.py" in out
assert "third" not in out
assert "coverage" not in out
assert "colorsys" not in out
def test_us_in_venv_isnt_measured(self, coverage_command: str) -> None:
out = run_in_venv(coverage_command + " run --source=third myproduct.py")
assert out == self.expected_stdout
# Check that our tracing was accurate. Modules are mentioned because
# --source refers to a module.
debug_out = self.get_trace_output()
assert re_lines(
r"^Not tracing .*\bexecfile.py': "
+ "module 'coverage.execfile' falls outside the --source spec",
debug_out,
)
assert re_lines(
r"^Not tracing .*\bmyproduct.py': module 'myproduct' falls outside the --source spec",
debug_out,
)
assert re_lines(
r"^Not tracing .*\bcolorsys.py': module 'colorsys' falls outside the --source spec",
debug_out,
)
out = run_in_venv(coverage_command + " report")
assert "myproduct.py" not in out
assert "third" in out
assert "coverage" not in out
assert "colorsys" not in out
def test_venv_isnt_measured(self, coverage_command: str) -> None:
out = run_in_venv(coverage_command + " run myproduct.py")
assert out == self.expected_stdout
debug_out = self.get_trace_output()
assert re_lines(r"^Not tracing .*\bexecfile.py': is part of coverage.py", debug_out)
assert re_lines(r"^Tracing .*\bmyproduct.py", debug_out)
assert re_lines(r"^Not tracing .*\bcolorsys.py': is in the stdlib", debug_out)
out = run_in_venv(coverage_command + " report")
assert "myproduct.py" in out
assert "third" not in out
assert "coverage" not in out
assert "colorsys" not in out
@pytest.mark.skipif(not testenv.C_TRACER, reason="No plugins with this core.")
def test_venv_with_dynamic_plugin(self, coverage_command: str) -> None:
# https://github.com/coveragepy/coveragepy/issues/1150
# Django coverage plugin was incorrectly getting warnings:
# "Already imported: ... django/template/blah.py"
# It happened because coverage imported the plugin, which imported
# Django, and then the Django files were reported as traceable.
self.make_file(".coveragerc", "[run]\nplugins=third.plugin\n")
self.make_file(
"myrender.py",
"""\
import third.render
print(third.render.render("hello.html", 1723))
""",
)
out = run_in_venv(coverage_command + " run --source=. myrender.py")
# The output should not have this warning:
# Already imported a file that will be measured: ...third/render.py (already-imported)
assert out == "HTML: hello.html@1723\n"
def test_installed_namespace_packages(self, coverage_command: str) -> None:
# https://github.com/coveragepy/coveragepy/issues/1231
# When namespace packages were installed, they were considered
# third-party packages. Test that isn't still happening.
out = run_in_venv(coverage_command + " run --source=nspkg myproduct.py")
# In particular, this warning doesn't appear:
# Already imported a file that will be measured: .../coverage/__main__.py
assert out == self.expected_stdout
# Check that our tracing was accurate. Files are mentioned because
# --source refers to a file.
debug_out = self.get_trace_output()
assert re_lines(
r"^Not tracing .*\bexecfile.py': "
+ "module 'coverage.execfile' falls outside the --source spec",
debug_out,
)
assert re_lines(
r"^Not tracing .*\bmyproduct.py': module 'myproduct' falls outside the --source spec",
debug_out,
)
assert re_lines(
r"^Not tracing .*\bcolorsys.py': module 'colorsys' falls outside the --source spec",
debug_out,
)
out = run_in_venv(coverage_command + " report")
# Name Stmts Miss Cover
# ------------------------------------------------------------------------------
# another_pkg/nspkg/sixth/__init__.py 2 0 100%
# venv/lib/python3.9/site-packages/nspkg/fifth/__init__.py 2 0 100%
# ------------------------------------------------------------------------------
# TOTAL 4 0 100%
assert "myproduct.py" not in out
assert "third" not in out
assert "coverage" not in out
assert "colorsys" not in out
assert "fifth" in out
assert "sixth" in out
def test_bug_888(self, coverage_command: str) -> None:
out = run_in_venv(
coverage_command + " run --source=bug888/app,bug888/plugin bug888/app/testcov/main.py",
)
# When the test fails, the output includes "Already imported a file that will be measured"
assert out == "Plugin here\n"
| VirtualenvTest |
python | pytorch__pytorch | torch/onnx/_internal/exporter/_capture_strategies.py | {
"start": 2131,
"end": 4447
} | class ____(abc.ABC):
"""Strategy for capturing a module as ExportedProgram.
To use a strategy, create an instance and call it with the model, args, kwargs, and dynamic_shapes.
Example::
strategy = TorchExportNonStrictStrategy(verbose=True)
result = strategy(model, args, kwargs, dynamic_shapes)
"""
def __init__(
self,
*,
verbose: bool = False,
dump: bool = False,
artifacts_dir: str | os.PathLike = ".",
timestamp: str | None = None,
) -> None:
"""Initialize the strategy.
Args:
verbose: Whether to print verbose messages.
dump: Whether to dump the intermediate artifacts to a file.
"""
self._verbose_print = _verbose_printer(verbose)
self._dump = dump
self._artifacts_dir = pathlib.Path(artifacts_dir)
self._timestamp = timestamp or datetime.datetime.now().strftime(
"%Y-%m-%d_%H-%M-%S-%f"
)
self._exception: Exception | None = None
def __call__(
self,
model: torch.nn.Module | torch.jit.ScriptFunction,
args: tuple[Any, ...],
kwargs: dict[str, Any] | None,
dynamic_shapes,
) -> Result:
self._enter(model)
if kwargs is None:
kwargs = {}
try:
exported_program = self._capture(model, args, kwargs, dynamic_shapes)
except Exception as e:
self._failure(model, e)
return Result(
exported_program=None,
strategy=self.__class__.__name__,
exception=e,
)
self._success(model)
return Result(
exported_program,
strategy=self.__class__.__name__,
exception=self._exception,
)
@abc.abstractmethod
def _capture(
self, model, args, kwargs, dynamic_shapes
) -> torch.export.ExportedProgram:
raise NotImplementedError
def _enter(self, model: torch.nn.Module | torch.jit.ScriptFunction) -> None:
return
def _success(self, model: torch.nn.Module | torch.jit.ScriptFunction) -> None:
return
def _failure(
self, model: torch.nn.Module | torch.jit.ScriptFunction, e: Exception
) -> None:
return
| CaptureStrategy |
python | tensorflow__tensorflow | tensorflow/python/training/adagrad.py | {
"start": 1141,
"end": 7522
} | class ____(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.
References:
Adaptive Subgradient Methods for Online Learning and Stochastic Optimization
:[Duchi et al., 2011](http://jmlr.org/papers/v12/duchi11a.html)
([pdf](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf))
@compatibility(TF2)
tf.compat.v1.train.AdagradOptimizer is compatible with eager mode and
`tf.function`.
When eager execution is enabled, `learning_rate`,
`initial_accumulator_value`, and `epsilon` can each be a callable that
takes no arguments and returns the actual value to use. This can be useful
for changing these values across different invocations of optimizer
functions.
To switch to native TF2 style, use [`tf.keras.optimizers.Adagrad`]
(https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adagrad)
instead. Please notice that due to the implementation differences,
`tf.keras.optimizers.Adagrad` and
`tf.compat.v1.train.AdagradOptimizer` may have slight differences in
floating point numerics even though the formula used for the variable
updates still matches.
#### Structural mapping to native TF2
Before:
```python
optimizer = tf.compat.v1.train.AdagradOptimizer(
learning_rate=learning_rate,
initial_accumulator_value=initial_accumulator_value)
```
After:
```python
optimizer = tf.keras.optimizers.Adagrad(
learning_rate=learning_rate,
initial_accumulator_value=initial_accumulator_value,
epsilon=1e-07)
```
#### How to map arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| ------------------ | ------------- | ------------------------------- |
| `learning_rate` | `learning_rate` | Be careful of setting |
: : : learning_rate tensor value computed from the global step. :
: : : In TF1 this was usually meant to imply a dynamic learning rate and :
: : : would recompute in each step. In TF2 (eager + function) it will :
: : : treat it as a scalar value that only gets computed once instead of :
: : : a symbolic placeholder to be computed each time. :
| `initial_accumulator_value` | `initial_accumulator_value` | The |
: : : argument can be value of zero in TF2, which is not accepted in TF1.|
| - | `epsilon` | `epsilon` is become configurable in TF2. The |
: : : defualt value is changed from 1e-8 to 1e-7 :
| `use_locking` | - | Not applicable in TF2. |
#### Before & after usage example
Before:
```python
x = tf.Variable([1,2,3], dtype=tf.float32)
grad = tf.constant([0.1, 0.2, 0.3])
optimizer = tf.compat.v1.train.AdagradOptimizer(learning_rate=0.001)
optimizer.apply_gradients(zip([grad], [x]))
```
After:
```python
x = tf.Variable([1,2,3], dtype=tf.float32)
grad = tf.constant([0.1, 0.2, 0.3])
optimizer = tf.keras.optimizers.Adagrad(learning_rate=0.001)
optimizer.apply_gradients(zip([grad], [x]))
```
@end_compatibility
"""
def __init__(self, learning_rate, initial_accumulator_value=0.1,
use_locking=False, name="Adagrad"):
"""Construct a new Adagrad optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
Raises:
ValueError: If the `initial_accumulator_value` is invalid.
"""
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value must be positive: %s" %
initial_accumulator_value)
super(AdagradOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_accumulator_value = initial_accumulator_value
# Created in Initialize.
self._learning_rate_tensor = None
def _create_slots(self, var_list):
for v in var_list:
dtype = v.dtype.base_dtype
if v.get_shape().is_fully_defined():
init = init_ops.constant_initializer(self._initial_accumulator_value,
dtype=dtype)
else:
init = self._init_constant_op(v, dtype)
self._get_or_make_slot_with_initializer(v, init, v.get_shape(), dtype,
"accumulator", self._name)
def _init_constant_op(self, v, dtype):
def init():
# Use a Tensor instead of initializer if variable does not have
# static shape.
init_constant = gen_array_ops.fill(array_ops.shape(v),
self._initial_accumulator_value)
return math_ops.cast(init_constant, dtype)
return init
def _prepare(self):
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
def _apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return gen_training_ops.apply_adagrad(
var,
acc,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return gen_training_ops.resource_apply_adagrad(
var.handle,
acc.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
acc = self.get_slot(var, "accumulator")
return gen_training_ops.sparse_apply_adagrad(
var,
acc,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
acc = self.get_slot(var, "accumulator")
return gen_training_ops.resource_sparse_apply_adagrad(
var.handle,
acc.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
| AdagradOptimizer |
python | pytorch__pytorch | test/torch_np/test_ndarray_methods.py | {
"start": 22601,
"end": 22966
} | class ____(TestCase):
# make sure ndarray does not carry extra methods/attributes
# >>> set(dir(a)) - set(dir(a.tensor.numpy()))
@parametrize("name", ["fn", "ivar", "method", "name", "plain", "rvar"])
def test_extra_methods(self, name):
a = np.ones(3)
with pytest.raises(AttributeError):
getattr(a, name)
| TestNoExtraMethods |
python | getsentry__sentry | tests/sentry/integrations/github/tasks/test_pr_comment.py | {
"start": 14642,
"end": 26980
} | class ____(GithubCommentTestCase):
def setUp(self) -> None:
super().setUp()
self.user_id = "user_1"
self.app_id = "app_1"
self.pr = self.create_pr_issues()
self.cache_key = DEBOUNCE_PR_COMMENT_CACHE_KEY(self.pr.id)
@patch(
"sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_top_5_issues_by_count"
)
@patch("sentry.integrations.source_code_management.commit_context.metrics")
@responses.activate
def test_comment_workflow(self, mock_metrics: MagicMock, mock_issues: MagicMock) -> None:
group_objs = Group.objects.order_by("id").all()
groups = [g.id for g in group_objs]
titles = [g.title for g in group_objs]
mock_issues.return_value = [{"group_id": id, "event_count": 10} for id in groups]
responses.add(
responses.POST,
self.base_url + "/repos/getsentry/sentry/issues/1/comments",
json={"id": 1},
headers={"X-Ratelimit-Limit": "60", "X-Ratelimit-Remaining": "59"},
)
github_comment_workflow(self.pr.id, self.project.id)
assert (
f'"body": "## Issues attributed to commits in this pull request\\nThis pull request was merged and Sentry observed the following issues:\\n\\n* \\u203c\\ufe0f [**{titles[0]}**](http://testserver/organizations/foo/issues/{groups[0]}/?referrer=github-pr-bot)\\n\\n* \\u203c\\ufe0f [**{titles[1]}**](http://testserver/organizations/foobar/issues/{groups[1]}/?referrer=github-pr-bot)\\n"'.encode()
in responses.calls[0].request.body
)
pull_request_comment_query = PullRequestComment.objects.all()
assert len(pull_request_comment_query) == 1
assert pull_request_comment_query[0].external_id == 1
assert pull_request_comment_query[0].comment_type == CommentType.MERGED_PR
mock_metrics.incr.assert_called_with("github.pr_comment.comment_created")
@patch(
"sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_top_5_issues_by_count"
)
@patch("sentry.integrations.source_code_management.commit_context.metrics")
@responses.activate
@freeze_time(datetime(2023, 6, 8, 0, 0, 0, tzinfo=UTC))
def test_comment_workflow_updates_comment(
self, mock_metrics: MagicMock, mock_issues: MagicMock
) -> None:
group_objs = Group.objects.order_by("id").all()
groups = [g.id for g in group_objs]
titles = [g.title for g in group_objs]
mock_issues.return_value = [{"group_id": id, "event_count": 10} for id in groups]
pull_request_comment = PullRequestComment.objects.create(
external_id=1,
pull_request_id=self.pr.id,
created_at=timezone.now() - timedelta(hours=1),
updated_at=timezone.now() - timedelta(hours=1),
group_ids=[1, 2, 3, 4],
)
# An Open PR comment should not affect the rest of the test as the filter should ignore it.
PullRequestComment.objects.create(
external_id=2,
pull_request_id=self.pr.id,
created_at=timezone.now() - timedelta(hours=1),
updated_at=timezone.now() - timedelta(hours=1),
group_ids=[],
comment_type=CommentType.OPEN_PR,
)
responses.add(
responses.PATCH,
self.base_url + "/repos/getsentry/sentry/issues/comments/1",
json={"id": 1},
headers={"X-Ratelimit-Limit": "60", "X-Ratelimit-Remaining": "59"},
)
github_comment_workflow(self.pr.id, self.project.id)
assert (
f'"body": "## Issues attributed to commits in this pull request\\nThis pull request was merged and Sentry observed the following issues:\\n\\n* \\u203c\\ufe0f [**{titles[0]}**](http://testserver/organizations/foo/issues/{groups[0]}/?referrer=github-pr-bot)\\n\\n* \\u203c\\ufe0f [**{titles[1]}**](http://testserver/organizations/foobar/issues/{groups[1]}/?referrer=github-pr-bot)\\n"'.encode()
in responses.calls[0].request.body
)
pull_request_comment.refresh_from_db()
assert pull_request_comment.group_ids == groups
assert pull_request_comment.updated_at == timezone.now()
mock_metrics.incr.assert_called_with("github.pr_comment.comment_updated")
@patch(
"sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_top_5_issues_by_count"
)
@patch("sentry.integrations.source_code_management.tasks.metrics")
@patch("sentry.integrations.github.integration.metrics")
@responses.activate
def test_comment_workflow_api_error(
self, mock_integration_metrics: MagicMock, mock_metrics: MagicMock, mock_issues: MagicMock
) -> None:
cache.set(self.cache_key, True, timedelta(minutes=5).total_seconds())
mock_issues.return_value = [
{"group_id": g.id, "event_count": 10} for g in Group.objects.all()
]
responses.add(
responses.POST,
self.base_url + "/repos/getsentry/sentry/issues/1/comments",
status=400,
json={"id": 1},
)
responses.add(
responses.POST,
self.base_url + "/repos/getsentry/sentry/issues/2/comments",
status=400,
json={
"message": "Unable to create comment because issue is locked.",
"documentation_url": "https://docs.github.com/articles/locking-conversations/",
},
)
responses.add(
responses.POST,
self.base_url + "/repos/getsentry/sentry/issues/3/comments",
status=400,
json={
"message": "API rate limit exceeded",
"documentation_url": "https://docs.github.com/rest/overview/resources-in-the-rest-api#rate-limiting",
},
)
with pytest.raises(ApiError):
github_comment_workflow(self.pr.id, self.project.id)
assert cache.get(self.cache_key) is None
mock_metrics.incr.assert_called_with("github.pr_comment.error", tags={"type": "api_error"})
pr_2 = self.create_pr_issues()
cache_key = DEBOUNCE_PR_COMMENT_CACHE_KEY(pr_2.id)
cache.set(cache_key, True, timedelta(minutes=5).total_seconds())
# does not raise ApiError for locked issue
github_comment_workflow(pr_2.id, self.project.id)
assert cache.get(cache_key) is None
mock_integration_metrics.incr.assert_called_with(
"github.pr_comment.error", tags={"type": "issue_locked_error"}
)
pr_3 = self.create_pr_issues()
cache_key = DEBOUNCE_PR_COMMENT_CACHE_KEY(pr_3.id)
cache.set(cache_key, True, timedelta(minutes=5).total_seconds())
# does not raise ApiError for rate limited error
github_comment_workflow(pr_3.id, self.project.id)
assert cache.get(cache_key) is None
mock_integration_metrics.incr.assert_called_with(
"github.pr_comment.error", tags={"type": "rate_limited_error"}
)
@patch(
"sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_issue_ids_from_pr",
return_value=[],
)
@patch(
"sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_top_5_issues_by_count"
)
@patch("sentry.integrations.source_code_management.tasks.metrics")
@patch("sentry.models.Organization.objects")
def test_comment_workflow_missing_org(
self, mock_repository, mock_metrics, mock_issues, mock_issue_query
):
# Organization.DoesNotExist should trigger the cache to release the key
cache.set(self.cache_key, True, timedelta(minutes=5).total_seconds())
mock_repository.get_from_cache.side_effect = Organization.DoesNotExist
github_comment_workflow(self.pr.id, self.project.id)
assert not mock_issues.called
assert cache.get(self.cache_key) is None
mock_metrics.incr.assert_called_with(
"source_code_management.pr_comment.error", tags={"type": "missing_org"}
)
@patch(
"sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_top_5_issues_by_count"
)
def test_comment_workflow_missing_org_option(self, mock_issues: MagicMock) -> None:
OrganizationOption.objects.set_value(
organization=self.organization, key="sentry:github_pr_bot", value=False
)
github_comment_workflow(self.pr.id, self.project.id)
assert not mock_issues.called
@patch(
"sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_top_5_issues_by_count"
)
@patch("sentry.models.Project.objects.get_from_cache")
@patch("sentry.integrations.source_code_management.tasks.metrics")
def test_comment_workflow_missing_project(
self, mock_metrics: MagicMock, mock_project: MagicMock, mock_issues: MagicMock
) -> None:
# Project.DoesNotExist should trigger the cache to release the key
cache.set(self.cache_key, True, timedelta(minutes=5).total_seconds())
mock_project.side_effect = Project.DoesNotExist
github_comment_workflow(self.pr.id, self.project.id)
assert not mock_issues.called
assert cache.get(self.cache_key) is None
mock_metrics.incr.assert_called_with(
"github.pr_comment.error", tags={"type": "missing_project"}
)
@patch(
"sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_top_5_issues_by_count"
)
@patch("sentry.models.Repository.objects")
@patch("sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_comment_body")
@patch("sentry.integrations.source_code_management.tasks.metrics")
def test_comment_workflow_missing_repo(
self, mock_metrics, mock_get_comment_body, mock_repository, mock_issues
):
# Repository.DoesNotExist should trigger the cache to release the key
cache.set(self.cache_key, True, timedelta(minutes=5).total_seconds())
mock_repository.get.side_effect = Repository.DoesNotExist
github_comment_workflow(self.pr.id, self.project.id)
mock_issues.return_value = [
{"group_id": g.id, "event_count": 10} for g in Group.objects.all()
]
assert not mock_issues.called
assert not mock_get_comment_body.called
assert cache.get(self.cache_key) is None
mock_metrics.incr.assert_called_with(
"source_code_management.pr_comment.error", tags={"type": "missing_repo"}
)
@patch(
"sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_top_5_issues_by_count"
)
@patch("sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_comment_body")
@patch("sentry.integrations.source_code_management.tasks.metrics")
def test_comment_workflow_missing_integration(
self, mock_metrics, mock_get_comment_body, mock_issues
):
# missing integration should trigger the cache to release the key
cache.set(self.cache_key, True, timedelta(minutes=5).total_seconds())
# inactive integration
with assume_test_silo_mode_of(Integration):
self.integration.update(status=ObjectStatus.DISABLED)
mock_issues.return_value = [
{"group_id": g.id, "event_count": 10} for g in Group.objects.all()
]
github_comment_workflow(self.pr.id, self.project.id)
assert not mock_issues.called
assert not mock_get_comment_body.called
assert cache.get(self.cache_key) is None
mock_metrics.incr.assert_called_with(
"source_code_management.pr_comment.error", tags={"type": "missing_integration"}
)
@patch(
"sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_top_5_issues_by_count"
)
@patch("sentry.integrations.github.integration.GitHubPRCommentWorkflow.get_comment_body")
@responses.activate
def test_comment_workflow_no_issues(
self, mock_get_comment_body: MagicMock, mock_issues: MagicMock
) -> None:
mock_issues.return_value = []
github_comment_workflow(self.pr.id, self.project.id)
assert mock_issues.called
assert not mock_get_comment_body.called
| TestCommentWorkflow |
python | getsentry__sentry | src/sentry/types/region.py | {
"start": 874,
"end": 3700
} | class ____:
"""A region of the Sentry platform, hosted by a region silo."""
name: str
"""The region's unique identifier."""
snowflake_id: int
"""The region's unique numeric representation for composing "snowflake" IDs.
Avoid using this in any context other than creating a new snowflake ID. Prefer
the name as the region's unique identifier. Snowflake IDs need to remain mutually
unique only within the same timestamp, so the meaning of a number may not be
stable over time if we ever choose to reassign or reuse the values.
The number must fit inside the maximum bit length specified by our snowflake ID
schema.
"""
address: str
"""The address of the region's silo.
Represent a region's hostname or IP address on the non-public network. This address
is used for RPC routing.
(e.g., "https://de.internal.getsentry.net" or https://10.21.99.10), and addresses
such as "http://localhost:8001" in a dev environment.
The customer facing address for a region is derived from a region's name
and `system.region-api-url-template`
"""
category: RegionCategory
"""The region's category."""
visible: bool = True
"""Whether the region is visible in API responses"""
def validate(self) -> None:
from sentry.utils.snowflake import REGION_ID
REGION_ID.validate(self.snowflake_id)
def to_url(self, path: str) -> str:
"""Resolve a path into a customer facing URL on this region's silo.
In monolith mode, there is likely only the historical simulated
region. The public URL of the simulated region is the same
as the application base URL.
"""
from sentry.api.utils import generate_region_url
if SiloMode.get_current_mode() == SiloMode.MONOLITH:
base_url = options.get("system.url-prefix")
else:
base_url = generate_region_url(self.name)
return urljoin(base_url, path)
def api_serialize(self) -> dict[str, Any]:
return {
"name": self.name,
"url": self.to_url(""),
}
def is_historic_monolith_region(self) -> bool:
"""Check whether this is a historic monolith region.
In a monolith environment, there exists only the one monolith "region",
which is a dummy object.
In a siloed environment whose data was migrated from a monolith environment,
all region-scoped entities that existed before the migration belong to the
historic monolith region by default. Unlike in the monolith environment,
this region is not a dummy object, but nonetheless is subject to special
cases to ensure that legacy data is handled correctly.
"""
return self.name == settings.SENTRY_MONOLITH_REGION
| Region |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/c_preproc.py | {
"start": 17494,
"end": 25054
} | class ____(object):
def __init__(self, nodepaths=None, defines=None):
self.lines = []
if defines is None:
self.defs = {}
else:
self.defs = dict(defines)
self.state = []
self.count_files = 0
self.currentnode_stack = []
self.nodepaths = nodepaths or []
self.nodes = []
self.names = []
self.curfile = ''
self.ban_includes = set()
self.listed = set()
def cached_find_resource(self, node, filename):
try:
cache = node.ctx.preproc_cache_node
except AttributeError:
cache = node.ctx.preproc_cache_node = Utils.lru_cache(FILE_CACHE_SIZE)
key = (node, filename)
try:
return cache[key]
except KeyError:
ret = node.find_resource(filename)
if ret:
if getattr(ret, 'children', None):
ret = None
elif ret.is_child_of(node.ctx.bldnode):
tmp = node.ctx.srcnode.search_node(ret.path_from(node.ctx.bldnode))
if tmp and getattr(tmp, 'children', None):
ret = None
cache[key] = ret
return ret
def tryfind(self, filename, kind='"', env=None):
if filename.endswith('.moc'):
self.names.append(filename)
return None
self.curfile = filename
found = None
if kind == '"':
if env.MSVC_VERSION:
for n in reversed(self.currentnode_stack):
found = self.cached_find_resource(n, filename)
if found:
break
else:
found = self.cached_find_resource(self.currentnode_stack[-1], filename)
if not found:
for n in self.nodepaths:
found = self.cached_find_resource(n, filename)
if found:
break
listed = self.listed
if found and not found in self.ban_includes:
if found not in listed:
listed.add(found)
self.nodes.append(found)
self.addlines(found)
else:
if filename not in listed:
listed.add(filename)
self.names.append(filename)
return found
def filter_comments(self, node):
code = node.read()
if use_trigraphs:
for (a, b) in trig_def:
code = code.split(a).join(b)
code = re_nl.sub('', code)
code = re_cpp.sub(repl, code)
return re_lines.findall(code)
def parse_lines(self, node):
try:
cache = node.ctx.preproc_cache_lines
except AttributeError:
cache = node.ctx.preproc_cache_lines = Utils.lru_cache(LINE_CACHE_SIZE)
try:
return cache[node]
except KeyError:
cache[node] = lines = self.filter_comments(node)
lines.append((POPFILE, ''))
lines.reverse()
return lines
def addlines(self, node):
self.currentnode_stack.append(node.parent)
self.count_files += 1
if self.count_files > recursion_limit:
raise PreprocError('recursion limit exceeded')
if Logs.verbose:
Logs.debug('preproc: reading file %r', node)
try:
lines = self.parse_lines(node)
except EnvironmentError:
raise PreprocError('could not read the file %r' % node)
except Exception:
if Logs.verbose > 0:
Logs.error('parsing %r failed %s', node, traceback.format_exc())
else:
self.lines.extend(lines)
def start(self, node, env):
Logs.debug('preproc: scanning %s (in %s)', node.name, node.parent.name)
self.current_file = node
self.addlines(node)
if env.DEFINES:
lst = format_defines(env.DEFINES)
lst.reverse()
self.lines.extend([('define', x) for x in lst])
while self.lines:
(token, line) = self.lines.pop()
if token == POPFILE:
self.count_files -= 1
self.currentnode_stack.pop()
continue
try:
state = self.state
if token[:2] == 'if':
state.append(undefined)
elif token == 'endif':
state.pop()
if token[0] != 'e':
if skipped in self.state or ignored in self.state:
continue
if token == 'if':
ret = eval_macro(tokenize(line), self.defs)
if ret:
state[-1] = accepted
else:
state[-1] = ignored
elif token == 'ifdef':
m = re_mac.match(line)
if m and m.group() in self.defs:
state[-1] = accepted
else:
state[-1] = ignored
elif token == 'ifndef':
m = re_mac.match(line)
if m and m.group() in self.defs:
state[-1] = ignored
else:
state[-1] = accepted
elif token == 'include' or token == 'import':
(kind, inc) = extract_include(line, self.defs)
self.current_file = self.tryfind(inc, kind, env)
if token == 'import':
self.ban_includes.add(self.current_file)
elif token == 'elif':
if state[-1] == accepted:
state[-1] = skipped
elif state[-1] == ignored:
if eval_macro(tokenize(line), self.defs):
state[-1] = accepted
elif token == 'else':
if state[-1] == accepted:
state[-1] = skipped
elif state[-1] == ignored:
state[-1] = accepted
elif token == 'define':
try:
self.defs[self.define_name(line)] = line
except AttributeError:
raise PreprocError('Invalid define line %r' % line)
elif token == 'undef':
m = re_mac.match(line)
if m and m.group() in self.defs:
self.defs.__delitem__(m.group())
elif token == 'pragma':
if re_pragma_once.match(line.lower()):
self.ban_includes.add(self.current_file)
except Exception as e:
if Logs.verbose:
Logs.debug('preproc: line parsing failed (%s): %s %s', e, line, traceback.format_exc())
def define_name(self, line):
return re_mac.match(line).group()
def scan(task):
try:
incn = task.generator.includes_nodes
except AttributeError:
raise Errors.WafError('%r is missing a feature such as "c", "cxx" or "includes": ' % task.generator)
if go_absolute:
nodepaths = incn + [task.generator.bld.root.find_dir(x) for x in standard_includes]
else:
nodepaths = [x for x in incn if x.is_child_of(x.ctx.srcnode) or x.is_child_of(x.ctx.bldnode)]
tmp = c_parser(nodepaths)
tmp.start(task.inputs[0], task.env)
return (tmp.nodes, tmp.names)
| c_parser |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_type_check.py | {
"start": 9470,
"end": 9674
} | class ____(TestCase):
def test_generic(self):
vals = isposinf(np.array((-1.0, 0, 1)) / 0.0)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
| TestIsposinf |
python | sqlalchemy__sqlalchemy | test/orm/test_merge.py | {
"start": 64132,
"end": 66293
} | class ____(fixtures.MappedTest):
"""Test interaction of merge() with load_on_pending relationships"""
@classmethod
def define_tables(cls, metadata):
Table(
"rocks",
metadata,
Column("id", Integer, primary_key=True),
Column("description", String(10)),
)
Table(
"bugs",
metadata,
Column("id", Integer, primary_key=True),
Column("rockid", Integer, ForeignKey("rocks.id")),
)
@classmethod
def setup_classes(cls):
class Rock(cls.Basic, ComparableEntity):
pass
class Bug(cls.Basic, ComparableEntity):
pass
def _setup_delete_orphan_o2o(self):
self.mapper_registry.map_imperatively(
self.classes.Rock,
self.tables.rocks,
properties={
"bug": relationship(
self.classes.Bug,
cascade="all,delete-orphan",
load_on_pending=True,
uselist=False,
)
},
)
self.mapper_registry.map_imperatively(
self.classes.Bug, self.tables.bugs
)
self.sess = fixture_session()
def _merge_delete_orphan_o2o_with(self, bug):
# create a transient rock with passed bug
r = self.classes.Rock(id=0, description="moldy")
r.bug = bug
m = self.sess.merge(r)
# we've already passed ticket #2374 problem since merge() returned,
# but for good measure:
assert m is not r
eq_(m, r)
def test_merge_delete_orphan_o2o_none(self):
"""one to one delete_orphan relationships marked load_on_pending
should be able to merge() with attribute None"""
self._setup_delete_orphan_o2o()
self._merge_delete_orphan_o2o_with(None)
def test_merge_delete_orphan_o2o(self):
"""one to one delete_orphan relationships marked load_on_pending
should be able to merge()"""
self._setup_delete_orphan_o2o()
self._merge_delete_orphan_o2o_with(self.classes.Bug(id=1))
| LoadOnPendingTest |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectors.py | {
"start": 6218,
"end": 12720
} | class ____:
@staticmethod
def self_provided(
*,
name: Optional[str] = None,
encoding: Optional[_MultiVectorEncodingConfigCreate] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
multi_vector_config: Optional[_MultiVectorConfigCreate] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
):
"""Create a multi-vector using no vectorizer. You will need to provide the vectors yourself.
Args:
name: The name of the vector.
encoding: The type of multi-vector encoding to use in the vector index. Defaults to `None`, which uses the server-defined default.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
multi_vector_config: The configuration for the multi-vector index. Use `wvc.config.Configure.VectorIndex.MultiVector` to create a multi-vector configuration. None by default
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
"""
return _VectorConfigCreate(
name=name,
vectorizer=_VectorizerConfigCreate(vectorizer=Vectorizers.NONE),
vector_index_config=_IndexWrappers.multi(
vector_index_config, quantizer, multi_vector_config, encoding
),
)
@staticmethod
def text2vec_jinaai(
*,
name: Optional[str] = None,
encoding: Optional[_MultiVectorEncodingConfigCreate] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
dimensions: Optional[int] = None,
model: Optional[str] = None,
source_properties: Optional[List[str]] = None,
multi_vector_config: Optional[_MultiVectorConfigCreate] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
vectorize_collection_name: bool = True,
) -> _VectorConfigCreate:
"""Create a multi-vector using the `text2colbert-jinaai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/colbert) for detailed usage.
Args:
name: The name of the vector.
encoding: The type of multi-vector encoding to use in the vector index. Defaults to `None`, which uses the server-defined default.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
dimensions: Number of dimensions. Applicable to v3 OpenAI models only. Defaults to `None`, which uses the server-defined default.
model: The model to use. Defaults to `None`, which uses the server-defined default.
encoding: The type of multi-vector encoding to use in the vector index. Defaults to `None`, which uses the server-defined default.
source_properties: Which properties should be included when vectorizing. By default all text properties are included.
multi_vector_config: The configuration for the multi-vector index. Use `wvc.config.Configure.VectorIndex.MultiVector` to create a multi-vector configuration. None by default
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
vectorize_collection_name: Whether to vectorize the collection name. Defaults to `True`.
"""
return _VectorConfigCreate(
name=name,
source_properties=source_properties,
vector_index_config=_IndexWrappers.multi(
vector_index_config, quantizer, multi_vector_config, encoding
),
vectorizer=_Text2ColbertJinaAIConfig(
model=model, dimensions=dimensions, vectorizeClassName=vectorize_collection_name
),
)
@staticmethod
def multi2vec_jinaai(
*,
name: Optional[str] = None,
encoding: Optional[_MultiVectorEncodingConfigCreate] = None,
quantizer: Optional[_QuantizerConfigCreate] = None,
base_url: Optional[AnyHttpUrl] = None,
image_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
model: Optional[Union[JinaMultimodalModel, str]] = None,
text_fields: Optional[Union[List[str], List[Multi2VecField]]] = None,
multi_vector_config: Optional[_MultiVectorConfigCreate] = None,
vector_index_config: Optional[_VectorIndexConfigCreate] = None,
) -> _VectorConfigCreate:
"""Create a vector using the `multi2multivec-jinaai` module.
See the [documentation](https://weaviate.io/developers/weaviate/model-providers/jinaai/embeddings-multimodal)
for detailed usage.
Args:
name: The name of the vector.
encoding: The type of multi-vector encoding to use in the vector index. Defaults to `None`, which uses the server-defined default.
quantizer: The quantizer to use for the vector index. If not provided, no quantization will be applied.
base_url: The base URL to use where API requests should go. Defaults to `None`, which uses the server-defined default.
image_fields: The image fields to use in vectorization.
model: The model to use. Defaults to `None`, which uses the server-defined default.
text_fields: The text fields to use in vectorization.
multi_vector_config: The configuration for the multi-vector index. Use `wvc.config.Configure.VectorIndex.MultiVector` to create a multi-vector configuration. None by default
vector_index_config: The configuration for Weaviate's vector index. Use `wvc.config.Configure.VectorIndex` to create a vector index configuration. None by default
Raises:
pydantic.ValidationError: If `model` is not a valid value from the `JinaMultimodalModel` type.
"""
return _VectorConfigCreate(
name=name,
vectorizer=_Multi2MultiVecJinaConfig(
baseURL=base_url,
model=model,
imageFields=_map_multi2vec_fields(image_fields),
textFields=_map_multi2vec_fields(text_fields),
),
vector_index_config=_IndexWrappers.multi(
vector_index_config, quantizer, multi_vector_config, encoding
),
)
| _MultiVectors |
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 99398,
"end": 100279
} | class ____(Request):
"""
Gets model information
:param task: Task id
:type task: str
"""
_service = "models"
_action = "get_by_task_id"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {"task": {"description": "Task id", "type": ["string", "null"]}},
"type": "object",
}
def __init__(self, task: Optional[str] = None, **kwargs: Any) -> None:
super(GetByTaskIdRequest, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| GetByTaskIdRequest |
python | streamlit__streamlit | lib/streamlit/elements/lib/column_types.py | {
"start": 5388,
"end": 5645
} | class ____(TypedDict):
type: Literal["date"]
format: NotRequired[str | Literal["localized", "distance", "iso8601"] | None]
min_value: NotRequired[str | None]
max_value: NotRequired[str | None]
step: NotRequired[int | None]
| DateColumnConfig |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/mrkl/base.py | {
"start": 1490,
"end": 6112
} | class ____(Agent):
"""Agent for the MRKL chain.
Args:
output_parser: Output parser for the agent.
"""
output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser)
@classmethod
@override
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
return MRKLOutputParser()
@property
def _agent_type(self) -> str:
"""Return Identifier of agent type."""
return AgentType.ZERO_SHOT_REACT_DESCRIPTION
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with.
Returns:
"Observation: "
"""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with.
Returns:
"Thought: "
"""
return "Thought:"
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: list[str] | None = None,
) -> PromptTemplate:
"""Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
format_instructions: Instructions on how to use the tools.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = render_text_description(list(tools))
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = f"{prefix}\n\n{tool_strings}\n\n{format_instructions}\n\n{suffix}"
if input_variables:
return PromptTemplate(template=template, input_variables=input_variables)
return PromptTemplate.from_template(template)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
output_parser: AgentOutputParser | None = None,
prefix: str = PREFIX,
suffix: str = SUFFIX,
format_instructions: str = FORMAT_INSTRUCTIONS,
input_variables: list[str] | None = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools.
Args:
llm: The LLM to use as the agent LLM.
tools: The tools to use.
callback_manager: The callback manager to use.
output_parser: The output parser to use.
prefix: The prefix to use.
suffix: The suffix to use.
format_instructions: The format instructions to use.
input_variables: The input variables to use.
kwargs: Additional parameters to pass to the agent.
"""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
format_instructions=format_instructions,
input_variables=input_variables,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
validate_tools_single_input(cls.__name__, tools)
if len(tools) == 0:
msg = (
f"Got no tools for {cls.__name__}. At least one tool must be provided."
)
raise ValueError(msg)
for tool in tools:
if tool.description is None:
msg = ( # type: ignore[unreachable]
f"Got a tool {tool.name} without a description. For this agent, "
f"a description must always be provided."
)
raise ValueError(msg)
super()._validate_tools(tools)
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
| ZeroShotAgent |
python | ethereum__web3.py | tests/core/middleware/test_transaction_signing.py | {
"start": 12590,
"end": 19693
} | class ____(AsyncBaseProvider):
async def coro_request(self, method, params):
raise NotImplementedError(f"Cannot make request for {method}:{params}")
@pytest_asyncio.fixture
async def async_w3_dummy(request_mocker):
w3_base = AsyncWeb3(provider=AsyncDummyProvider(), middleware=[])
async with request_mocker(
w3_base,
mock_results={
"eth_sendRawTransaction": lambda *args: args,
"net_version": 1,
"eth_chainId": "0x02",
},
):
yield w3_base
@pytest_asyncio.fixture
async def async_w3():
_async_w3 = AsyncWeb3(AsyncEthereumTesterProvider())
accounts = await _async_w3.eth.accounts
_async_w3.eth.default_account = accounts[0]
return _async_w3
@pytest_asyncio.fixture
async def async_fund_account(async_w3):
# fund local account
tx_value = async_w3.to_wei(10, "ether")
for address in (ADDRESS_1, ADDRESS_2):
await async_w3.eth.send_transaction(
{
"to": address,
"from": async_w3.eth.default_account,
"gas": 21000,
"value": tx_value,
}
)
acct_bal = await async_w3.eth.get_balance(address)
assert acct_bal == tx_value
@pytest.mark.parametrize(
"method,key_object,from_,expected",
TEST_SIGN_AND_SEND_RAW_MIDDLEWARE_PARAMS,
)
@pytest.mark.asyncio
async def test_async_sign_and_send_raw_middleware(
async_w3_dummy,
method,
from_,
expected,
key_object,
):
async_w3_dummy.middleware_onion.inject(
SignAndSendRawMiddlewareBuilder.build(key_object), layer=0
)
legacy_transaction = {
"to": "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf",
"from": from_,
"gas": 21000,
"gasPrice": 10**9,
"value": 1,
"nonce": 0,
}
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
await async_w3_dummy.manager.coro_request(
method,
[legacy_transaction],
)
else:
# assert with legacy txn params
actual = await async_w3_dummy.manager.coro_request(
method,
[legacy_transaction],
)
assert_method_and_txn_signed(actual, expected)
# assert with dynamic fee transaction params and explicit type
dynamic_fee_transaction = dissoc(legacy_transaction, "gasPrice")
dynamic_fee_transaction = assoc(
dynamic_fee_transaction, "maxFeePerGas", 2000000000
)
dynamic_fee_transaction = assoc(
dynamic_fee_transaction, "maxPriorityFeePerGas", 1000000000
)
dynamic_fee_transaction = assoc(dynamic_fee_transaction, "type", "0x2")
actual_dynamic_fee_call = await async_w3_dummy.manager.coro_request(
method, [dynamic_fee_transaction]
)
assert_method_and_txn_signed(actual_dynamic_fee_call, expected)
# assert with dynamic fee transaction params and no explicit type
dynamic_fee_transaction_no_type = dissoc(dynamic_fee_transaction, "type")
actual_dynamic_fee_call_no_type = await async_w3_dummy.manager.coro_request(
method, [dynamic_fee_transaction_no_type]
)
assert_method_and_txn_signed(actual_dynamic_fee_call_no_type, expected)
@pytest.mark.parametrize(
"transaction,expected,key_object,from_",
TEST_SIGNED_TRANSACTION_PARAMS,
ids=[
"with set gas",
"with no set gas",
"with mismatched sender",
"with invalid sender",
"with gasPrice lower than base fee",
"with txn type and dynamic fee txn params",
"with dynamic fee txn params and no type",
],
)
@pytest.mark.asyncio
async def test_async_signed_transaction(
async_w3,
async_fund_account,
transaction,
expected,
key_object,
from_,
):
async_w3.middleware_onion.inject(
SignAndSendRawMiddlewareBuilder.build(key_object), layer=0
)
# Drop any falsy addresses
to_from = valfilter(bool, {"to": async_w3.eth.default_account, "from": from_})
_transaction = merge(transaction, to_from)
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
await async_w3.eth.send_transaction(_transaction)
else:
start_balance = await async_w3.eth.get_balance(
_transaction.get("from", async_w3.eth.default_account)
)
await async_w3.eth.send_transaction(_transaction)
assert (
await async_w3.eth.get_balance(_transaction.get("from"))
<= start_balance + expected
)
@pytest.mark.parametrize(
"from_converter,to_converter",
(
(identity, identity),
(hex_to_bytes, identity),
(identity, hex_to_bytes),
(hex_to_bytes, hex_to_bytes),
),
)
@pytest.mark.asyncio
async def test_async_sign_and_send_raw_middleware_with_byte_addresses(
async_w3_dummy, from_converter, to_converter
):
private_key = PRIVATE_KEY_1
from_ = from_converter(ADDRESS_1)
to_ = to_converter(ADDRESS_2)
async_w3_dummy.middleware_onion.inject(
SignAndSendRawMiddlewareBuilder.build(private_key), layer=0
)
actual = await async_w3_dummy.manager.coro_request(
"eth_sendTransaction",
[
{
"to": to_,
"from": from_,
"gas": 21000,
"gasPrice": 0,
"value": 1,
"nonce": 0,
}
],
)
raw_txn = actual[1][0]
actual_method = actual[0]
assert actual_method == "eth_sendRawTransaction"
assert is_hexstr(raw_txn)
@pytest.mark.asyncio
async def test_async_sign_and_send_raw_middleware_with_buffered_gas_estimate_middleware(
async_w3_dummy, request_mocker
):
gas_buffer = 100000 # the default internal value
gas_estimate = 12345 - gas_buffer
async_w3_dummy.middleware_onion.add(BufferedGasEstimateMiddleware)
async_w3_dummy.middleware_onion.inject(
SignAndSendRawMiddlewareBuilder.build(PRIVATE_KEY_1), layer=0
)
async with request_mocker(
async_w3_dummy,
mock_results={
"eth_getBlockByNumber": {"gasLimit": 200000}, # arbitrary high number
"eth_estimateGas": gas_estimate,
},
):
actual = await async_w3_dummy.manager.coro_request(
"eth_sendTransaction",
[
{
"to": ADDRESS_2,
"from": ADDRESS_1,
"value": 1,
"nonce": 0,
"maxFeePerGas": 10**9,
"maxPriorityFeePerGas": 10**9,
}
],
)
raw_txn = actual[1][0]
actual_method = actual[0]
assert actual_method == "eth_sendRawTransaction"
assert is_hexstr(raw_txn)
decoded_txn = rlp.decode(HexBytes(raw_txn[4:]), sedes=DynamicFeeTransaction)
assert decoded_txn["gas"] == gas_estimate + gas_buffer
| AsyncDummyProvider |
python | PrefectHQ__prefect | src/prefect/events/schemas/deployment_triggers.py | {
"start": 2375,
"end": 2626
} | class ____(BaseDeploymentTrigger, EventTrigger):
"""
A trigger that fires based on the presence or absence of events within a given
period of time.
"""
trigger_type: ClassVar[Type[TriggerTypes]] = EventTrigger
| DeploymentEventTrigger |
python | nedbat__coveragepy | tests/test_testing.py | {
"start": 13798,
"end": 17596
} | class ____(CoverageTest):
"""Tests of assert_coverage_warnings"""
def test_one_warning(self) -> None:
with pytest.warns(Warning) as warns:
warnings.warn("Hello there", category=CoverageWarning)
assert_coverage_warnings(warns, "Hello there")
def test_many_warnings(self) -> None:
with pytest.warns(Warning) as warns:
warnings.warn("The first", category=CoverageWarning)
warnings.warn("The second", category=CoverageWarning)
warnings.warn("The third", category=CoverageWarning)
assert_coverage_warnings(warns, "The first", "The second", "The third")
def test_wrong_type(self) -> None:
with pytest.warns(Warning) as warns:
warnings.warn("Not ours", category=Warning)
with pytest.raises(AssertionError):
assert_coverage_warnings(warns, "Not ours")
def test_wrong_message(self) -> None:
with pytest.warns(Warning) as warns:
warnings.warn("Goodbye", category=CoverageWarning)
with pytest.raises(AssertionError):
assert_coverage_warnings(warns, "Hello there")
def test_wrong_number_too_many(self) -> None:
with pytest.warns(Warning) as warns:
warnings.warn("The first", category=CoverageWarning)
warnings.warn("The second", category=CoverageWarning)
with pytest.raises(AssertionError):
assert_coverage_warnings(warns, "The first", "The second", "The third")
def test_wrong_number_too_few(self) -> None:
with pytest.warns(Warning) as warns:
warnings.warn("The first", category=CoverageWarning)
warnings.warn("The second", category=CoverageWarning)
warnings.warn("The third", category=CoverageWarning)
with pytest.raises(AssertionError):
assert_coverage_warnings(warns, "The first", "The second")
def test_regex_matches(self) -> None:
with pytest.warns(Warning) as warns:
warnings.warn("The first", category=CoverageWarning)
assert_coverage_warnings(warns, re.compile("f?rst"))
def test_regex_doesnt_match(self) -> None:
with pytest.warns(Warning) as warns:
warnings.warn("The first", category=CoverageWarning)
with pytest.raises(AssertionError):
assert_coverage_warnings(warns, re.compile("second"))
def test_failing_proxy() -> None:
class Arithmetic:
"""Sample class to test FailingProxy."""
# pylint: disable=missing-function-docstring
def add(self, a, b): # type: ignore[no-untyped-def]
return a + b
def subtract(self, a, b): # type: ignore[no-untyped-def]
return a - b
proxy = FailingProxy(Arithmetic(), "add", [RuntimeError("First"), RuntimeError("Second")])
# add fails the first time
with pytest.raises(RuntimeError, match="First"):
proxy.add(1, 2)
# subtract always works
assert proxy.subtract(10, 3) == 7
# add fails the second time
with pytest.raises(RuntimeError, match="Second"):
proxy.add(3, 4)
# then add starts working
assert proxy.add(5, 6) == 11
def test_all_our_source_files() -> None:
# Twas brillig and the slithy toves
i = 0
for i, (source_file, source) in enumerate(all_our_source_files(), start=1):
has_toves = (source_file.name == "test_testing.py") # fmt: skip
assert (("# Twas brillig " + "and the slithy toves") in source) == has_toves
# tests/__init__.py is shortest at 196
assert len(source) > 190, (
f"{source_file} is shorter ({len(source)} bytes) than the expected smallest file"
)
# currently 119 files
assert 100 < i < 140, f"Expected about 118 source files, got {i}"
| AssertCoverageWarningsTest |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/assets.py | {
"start": 2892,
"end": 3056
} | class ____(BaseModel):
"""Asset alias collection response."""
asset_aliases: Iterable[AssetAliasResponse]
total_entries: int
| AssetAliasCollectionResponse |
python | falconry__falcon | tests/asgi/test_hello_asgi.py | {
"start": 629,
"end": 2234
} | class ____:
sample_status = '200 OK'
sample_unicode = 'Hello World! \x80 - ' + testing.rand_string(0, 5)
sample_utf8 = sample_unicode.encode('utf-8')
def __init__(self, mode):
self.called = False
self.mode = mode
async def on_get(self, req, resp):
self.called = True
self.req, self.resp = req, resp
resp.status = falcon.HTTP_200
if 'stream' in self.mode:
if 'filelike' in self.mode:
stream = DataReader(self.sample_utf8)
else:
async def data_emitter():
for b in self.sample_utf8:
yield bytes([b])
if 'stream_genfunc' in self.mode:
stream = data_emitter
elif 'stream_nongenfunc' in self.mode:
stream = 42
else:
stream = data_emitter()
if 'stream_len' in self.mode:
stream_len = len(self.sample_utf8)
else:
stream_len = None
if 'use_helper' in self.mode:
resp.set_stream(stream, stream_len)
else:
resp.stream = stream
resp.content_length = stream_len
if 'body' in self.mode:
if 'bytes' in self.mode:
resp.text = self.sample_utf8
else:
resp.text = self.sample_unicode
if 'data' in self.mode:
resp.data = self.sample_utf8
async def on_head(self, req, resp):
await self.on_get(req, resp)
| HelloResource |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/dag_stats.py | {
"start": 1094,
"end": 1328
} | class ____(BaseModel):
"""DAG Stats serializer for responses."""
dag_id: str
dag_display_name: str = Field(validation_alias=AliasPath("dag_model", "dag_display_name"))
stats: list[DagStatsStateResponse]
| DagStatsResponse |
python | huggingface__transformers | src/transformers/models/canine/modeling_canine.py | {
"start": 55400,
"end": 58648
} | class ____(CaninePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.canine = CanineModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.canine(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
]
| CanineForQuestionAnswering |
python | kamyu104__LeetCode-Solutions | Python/lexicographically-smallest-string-after-applying-operations.py | {
"start": 1597,
"end": 2416
} | class ____(object):
def findLexSmallestString(self, s, a, b):
"""
:type s: str
:type a: int
:type b: int
:rtype: str
"""
q, lookup, result = collections.deque([s]), {s}, s
while q:
curr = q.popleft()
if curr < result:
result = curr
add_a = list(curr)
for i, c in enumerate(add_a):
if i%2:
add_a[i] = str((int(c)+a) % 10)
add_a = "".join(add_a)
if add_a not in lookup:
lookup.add(add_a)
q.append(add_a)
rotate_b = curr[b:] + curr[:b]
if rotate_b not in lookup:
lookup.add(rotate_b)
q.append(rotate_b)
return result
| Solution2 |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax_error.py | {
"start": 1959,
"end": 2293
} | class ____(typing.NamedTuple):
my_var: int | str # [unsupported-binary-operation]
# Check typing.TypedDict
CustomTypedDict = TypedDict("CustomTypedDict", my_var=int | str) # [unsupported-binary-operation]
CustomTypedDict2 = TypedDict("CustomTypedDict2", {"my_var": int | str}) # [unsupported-binary-operation]
| CustomNamedTuple3 |
python | pytorch__pytorch | test/distributed/test_nvshmem.py | {
"start": 28295,
"end": 32552
} | class ____(MultiProcContinuousTest):
def _init_device(self) -> None:
# TODO: relieve this (seems to hang if without)
device_module.set_device(self.device)
# Set NVSHMEM as SymmMem backend
symm_mem.set_backend("NVSHMEM")
@property
def device(self) -> torch.device:
return torch.device(device_type, self.rank)
@skipIfRocm
@parametrize("tile_size", [32, 128, 512])
@parametrize("dtype", [torch.float, torch.half, torch.bfloat16])
def test_tile_reduce(self, tile_size: int, dtype: torch.dtype) -> None:
full_size = 1024
assert tile_size <= full_size
self._init_device()
group_name = dist.group.WORLD.group_name
symm_mem.enable_symm_mem_for_group(group_name)
full_inp = symm_mem.empty(
full_size, full_size, dtype=dtype, device=self.device
).fill_(self.rank)
full_out = symm_mem.empty(
full_size, full_size, dtype=dtype, device=self.device
).fill_(0)
slice_ut = slice(tile_size, 2 * tile_size)
inp_tile = full_inp[slice_ut, slice_ut]
out_tile = full_out[slice_ut, slice_ut]
# Reduce the tile
root = 0
torch.ops.symm_mem.tile_reduce(inp_tile, out_tile, root, group_name)
# Check data
expected = torch.zeros_like(full_out)
expected_tile = expected[slice_ut, slice_ut]
if self.rank == root:
expected_tile.fill_(self.world_size * (self.world_size - 1) / 2)
torch.testing.assert_close(full_out, expected)
@skipIfRocm
@parametrize("tile_size", [32, 128, 512])
@parametrize(
"root_ratio", [1, 2]
) # 1: all ranks are roots, 2: half of ranks are roots
@parametrize("dtype", [torch.float, torch.half, torch.bfloat16])
def test_multi_root_tile_reduce(
self, tile_size: int, root_ratio: int, dtype: torch.dtype
) -> None:
full_size = 2048
num_slices_col = 2 # number of tiles on column dimension
num_slices_row = (
self.world_size // num_slices_col
) # number of tiles on row dimension
assert tile_size * num_slices_col <= full_size
assert tile_size * num_slices_row <= full_size
self._init_device()
group_name = dist.group.WORLD.group_name
symm_mem.enable_symm_mem_for_group(group_name)
full_inp = symm_mem.empty(
full_size, full_size, dtype=dtype, device=self.device
).fill_(self.rank)
full_out = symm_mem.empty(
full_size, full_size, dtype=dtype, device=self.device
).fill_(0)
# Get range of each slice in terms of element indices
slices_row = [
slice(s * tile_size, (s + 1) * tile_size) for s in range(num_slices_row)
]
slices_col = [
slice(s * tile_size, (s + 1) * tile_size) for s in range(num_slices_col)
]
# Active roots, can be a subset of all ranks
num_active_roots = self.world_size // root_ratio
active_roots = list(range(num_active_roots))
# Map rank to slice indices (e.g. rank 0 -> (0, 0), rank 1 -> (0, 1), rank 2 -> (1, 0), rank 3 -> (1, 1))
map_rank_to_slices = lambda r: ( # noqa: E731
slices_row[r // num_slices_col],
slices_col[r % num_slices_col],
)
# Populate input tiles
input_tiles_ij = [map_rank_to_slices(r) for r in active_roots]
input_tiles = [
full_inp[slice_i, slice_j] for (slice_i, slice_j) in input_tiles_ij
]
# My output tile (i.e. the one that I will reduce)
out_tile_ij = map_rank_to_slices(self.rank)
out_tile = full_out[out_tile_ij[0], out_tile_ij[1]]
# Reduce the tiles
torch.ops.symm_mem.multi_root_tile_reduce(
input_tiles, out_tile, active_roots, group_name
)
# Check data
expected = torch.zeros_like(full_out)
expected_tile = expected[out_tile_ij[0], out_tile_ij[1]]
if self.rank in active_roots:
expected_tile.fill_(self.world_size * (self.world_size - 1) / 2)
torch.testing.assert_close(full_out, expected)
if __name__ == "__main__":
run_tests()
| NVSHMEMTileCommTest |
python | neetcode-gh__leetcode | python/0049-group-anagrams.py | {
"start": 0,
"end": 1085
} | class ____:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
groups = {}
# Iterate over strings
for s in strs: # O(m)
count = {}
# Count frequency of each character
for char in s: # O(n)
count[char] = count.get(char, 0) + 1
# Convert count Dict to List, sort it, and then convert to Tuple (we cannot use dicts or lists as keys in a hashmap)
tup = tuple(sorted(count.items())) # O(1) because there is limited amount of possible keys in the alphabet -> O(26) + O(26*log26) + O(26)
if tup in groups:
groups[tup].append(s)
else:
groups[tup] = [s]
return list(groups.values())
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
ans = collections.defaultdict(list)
for s in strs:
count = [0] * 26
for c in s:
count[ord(c) - ord("a")] += 1
ans[tuple(count)].append(s)
return list(ans.values())
| Solution |
python | realpython__materials | python-string/person.py | {
"start": 0,
"end": 356
} | class ____:
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return f"{type(self).__name__}(name='{self.name}', age={self.age})"
def __str__(self):
return f"I'm {self.name}, and I'm {self.age} years old."
john = Person("John Doe", 35)
print(repr(john))
print(str(john))
| Person |
python | google__pytype | pytype/abstract/_function_base.py | {
"start": 1533,
"end": 5971
} | class ____(_instance_base.SimpleValue, types.Function):
"""Base class for function objects (NativeFunction, InterpreterFunction).
Attributes:
name: Function name. Might just be something like "<lambda>".
ctx: context.Context instance.
"""
bound_class: type["BoundFunction"]
is_abstract: bool
is_classmethod: bool
is_overload: bool
is_method: bool
def __init__(self, name: str, ctx: "context.Context") -> None:
super().__init__(name, ctx)
self.cls = _classes.FunctionPyTDClass(self, ctx)
self.is_attribute_of_class = False
self.is_classmethod = False
self.is_abstract = False
self.is_overload = False
self.is_method = "." in name
self.decorators = []
self.members["func_name"] = self.ctx.convert.build_string(
self.ctx.root_node, name
)
def property_get(
self, callself: "cfg.Variable", is_class: bool = False
) -> "BoundFunction|Function":
if self.name == "__new__" or not callself or is_class:
return self
self.is_attribute_of_class = True
# We'd like to cache this, but we can't. "callself" contains Variables
# that would be tied into a BoundFunction instance. However, those
# Variables aren't necessarily visible from other parts of the CFG binding
# this function. See test_duplicate_getproperty() in tests/test_flow.py.
return self.bound_class(callself, self) # pytype: disable=wrong-arg-types
def _get_cell_variable_name(self, var: "cfg.Variable") -> str | None:
"""Get the python variable name of a pytype Variable."""
f = self.ctx.vm.frame
if not f:
# Should not happen but does in some contrived test cases.
return None
for name, v in zip(f.f_code.freevars, f.cells):
if v == var:
return name
return None
def match_args(
self,
node: "cfg.CFGNode",
args: function.Args,
alias_map: "datatypes.UnionFind | None" = None,
match_all_views: bool = False,
):
"""Check whether the given arguments can match the function signature."""
for a in args.posargs:
if not a.bindings:
# The only way to get an unbound variable here is to reference a closure
# cellvar before it is assigned to in the outer scope.
name = self._get_cell_variable_name(a)
assert name is not None, "Closure variable lookup failed."
raise error_types.UndefinedParameterError(name)
return self._match_args_sequentially(node, args, alias_map, match_all_views)
def _match_args_sequentially(
self,
node: "cfg.CFGNode",
args: function.Args,
alias_map: "datatypes.UnionFind | None",
match_all_views: bool,
):
raise NotImplementedError(self.__class__.__name__)
def __repr__(self) -> str:
return self.full_name + "(...)"
def _extract_defaults(
self, defaults_var: "cfg.Variable"
) -> "tuple[cfg.Variable, ...] | None":
"""Extracts defaults from a Variable, used by set_function_defaults.
Args:
defaults_var: Variable containing potential default values.
Returns:
A tuple of default values, if one could be extracted, or None otherwise.
"""
# Case 1: All given data are tuple constants. Use the longest one.
if all(isinstance(d, _instances.Tuple) for d in defaults_var.data):
return max((d.pyval for d in defaults_var.data), key=len)
else:
# Case 2: Data are entirely Tuple Instances, Unknown or Unsolvable. Make
# all parameters except self/cls optional.
# Case 3: Data is anything else. Same as Case 2, but emit a warning.
if not (
all(
isinstance(
d,
(
_instance_base.Instance,
_singletons.Unknown,
_singletons.Unsolvable,
),
)
for d in defaults_var.data
)
and all(
d.full_name == "builtins.tuple"
for d in defaults_var.data
if isinstance(d, _instance_base.Instance)
)
):
self.ctx.errorlog.bad_function_defaults(self.ctx.vm.frames, self.name)
# The ambiguous case is handled by the subclass.
return None
def set_function_defaults(self, node, defaults_var):
raise NotImplementedError(self.__class__.__name__)
def update_signature_scope(self, cls: _classes.InterpreterClass) -> None:
return
| Function |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/emitter.py | {
"start": 2856,
"end": 67589
} | class ____:
# fmt: off
DEFAULT_TAG_PREFIXES = {
'!': '!',
'tag:yaml.org,2002:': '!!',
}
# fmt: on
MAX_SIMPLE_KEY_LENGTH = 128
def __init__(
self,
stream,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
brace_single_entry_mapping_in_flow_sequence=None,
dumper=None,
):
# type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None # NOQA
self.dumper = dumper
if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None:
self.dumper._emitter = self
self.stream = stream
# Encoding can be overriden by STREAM-START.
self.encoding = None # type: Optional[Text]
self.allow_space_break = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = [] # type: List[Any]
self.state = self.expect_stream_start # type: Any
# Current event and the event queue.
self.events = [] # type: List[Any]
self.event = None # type: Any
# The current indentation level and the stack of previous indents.
self.indents = Indents()
self.indent = None # type: Optional[int]
# flow_context is an expanding/shrinking list consisting of '{' and '['
# for each unclosed flow context. If empty list that means block context
self.flow_context = [] # type: List[Text]
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
self.compact_seq_seq = True # dash after dash
self.compact_seq_map = True # key after dash
# self.compact_ms = False # dash after key, only when excplicit key with ?
self.no_newline = None # type: Optional[bool] # set if directly after `- `
# Whether the document requires an explicit document end indicator
self.open_ended = False
# colon handling
self.colon = ':'
self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon
# single entry mappings in flow sequence
self.brace_single_entry_mapping_in_flow_sequence = (
brace_single_entry_mapping_in_flow_sequence # NOQA
)
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
# set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis
self.unicode_supplementary = sys.maxunicode > 0xFFFF
self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0
self.top_level_colon_align = top_level_colon_align
self.best_sequence_indent = 2
self.requested_indent = indent # specific for literal zero indent
if indent and 1 < indent < 10:
self.best_sequence_indent = indent
self.best_map_indent = self.best_sequence_indent
# if self.best_sequence_indent < self.sequence_dash_offset + 1:
# self.best_sequence_indent = self.sequence_dash_offset + 1
self.best_width = 80
if width and width > self.best_sequence_indent * 2:
self.best_width = width
self.best_line_break = '\n' # type: Any
if line_break in ['\r', '\n', '\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None # type: Any
# Prepared anchor and tag.
self.prepared_anchor = None # type: Any
self.prepared_tag = None # type: Any
# Scalar analysis and style.
self.analysis = None # type: Any
self.style = None # type: Any
self.scalar_after_indicator = True # write a scalar on the same line as `---`
self.alt_null = 'null'
@property
def stream(self):
# type: () -> Any
try:
return self._stream
except AttributeError:
raise YAMLStreamError('output stream needs to specified')
@stream.setter
def stream(self, val):
# type: (Any) -> None
if val is None:
return
if not hasattr(val, 'write'):
raise YAMLStreamError('stream argument needs to have a write() method')
self._stream = val
@property
def serializer(self):
# type: () -> Any
try:
if hasattr(self.dumper, 'typ'):
return self.dumper.serializer
return self.dumper._serializer
except AttributeError:
return self # cyaml
@property
def flow_level(self):
# type: () -> int
return len(self.flow_context)
def dispose(self):
# type: () -> None
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
# type: (Any) -> None
if dbg(DBG_EVENT):
nprint(event)
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
# type: () -> bool
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
# type: (int) -> bool
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return len(self.events) < count + 1
def increase_indent(self, flow=False, sequence=None, indentless=False):
# type: (bool, Optional[bool], bool) -> None
self.indents.append(self.indent, sequence)
if self.indent is None: # top level
if flow:
# self.indent = self.best_sequence_indent if self.indents.last_seq() else \
# self.best_map_indent
# self.indent = self.best_sequence_indent
self.indent = self.requested_indent
else:
self.indent = 0
elif not indentless:
self.indent += (
self.best_sequence_indent if self.indents.last_seq() else self.best_map_indent
)
# if self.indents.last_seq():
# if self.indent == 0: # top level block sequence
# self.indent = self.best_sequence_indent - self.sequence_dash_offset
# else:
# self.indent += self.best_sequence_indent
# else:
# self.indent += self.best_map_indent
# States.
# Stream handlers.
def expect_stream_start(self):
# type: () -> None
if isinstance(self.event, StreamStartEvent):
if self.event.encoding and not hasattr(self.stream, 'encoding'):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError(
_F('expected StreamStartEvent, but got {self_event!s}', self_event=self.event)
)
def expect_nothing(self):
# type: () -> None
raise EmitterError(
_F('expected nothing, but got {self_event!s}', self_event=self.event)
)
# Document handlers.
def expect_first_document_start(self):
# type: () -> Any
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
# type: (bool) -> None
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator('...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = sorted(self.event.tags.keys())
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (
first
and not self.event.explicit
and not self.canonical
and not self.event.version
and not self.event.tags
and not self.check_empty_document()
)
if not implicit:
self.write_indent()
self.write_indicator('---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator('...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError(
_F(
'expected DocumentStartEvent, but got {self_event!s}',
self_event=self.event,
)
)
def expect_document_end(self):
# type: () -> None
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator('...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError(
_F('expected DocumentEndEvent, but got {self_event!s}', self_event=self.event)
)
def expect_document_root(self):
# type: () -> None
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False):
# type: (bool, bool, bool, bool) -> None
self.root_context = root
self.sequence_context = sequence # not used in PyYAML
force_flow_indent = False
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
if (
self.process_anchor('&')
and isinstance(self.event, ScalarEvent)
and self.sequence_context
):
self.sequence_context = False
if (
root
and isinstance(self.event, ScalarEvent)
and not self.scalar_after_indicator
):
self.write_indent()
self.process_tag()
if isinstance(self.event, ScalarEvent):
# nprint('@', self.indention, self.no_newline, self.column)
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
# nprint('@', self.indention, self.no_newline, self.column)
i2, n2 = self.indention, self.no_newline # NOQA
if self.event.comment:
if self.event.flow_style is False:
if self.write_post_comment(self.event):
self.indention = False
self.no_newline = True
if self.event.flow_style:
column = self.column
if self.write_pre_comment(self.event):
if self.event.flow_style:
# force_flow_indent = True
force_flow_indent = not self.indents.values[-1][1]
self.indention = i2
self.no_newline = not self.indention
if self.event.flow_style:
self.column = column
if (
self.flow_level
or self.canonical
or self.event.flow_style
or self.check_empty_sequence()
):
self.expect_flow_sequence(force_flow_indent)
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.event.flow_style is False and self.event.comment:
self.write_post_comment(self.event)
if self.event.comment and self.event.comment[1]:
self.write_pre_comment(self.event)
if self.event.flow_style:
force_flow_indent = not self.indents.values[-1][1]
if (
self.flow_level
or self.canonical
or self.event.flow_style
or self.check_empty_mapping()
):
self.expect_flow_mapping(single=self.event.nr_items == 1,
force_flow_indent=force_flow_indent)
else:
self.expect_block_mapping()
else:
raise EmitterError(
_F('expected NodeEvent, but got {self_event!s}', self_event=self.event)
)
def expect_alias(self):
# type: () -> None
if self.event.anchor is None:
raise EmitterError('anchor is not specified for alias')
self.process_anchor('*')
self.state = self.states.pop()
def expect_scalar(self):
# type: () -> None
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self, force_flow_indent=False):
# type: (Optional[bool]) -> None
if force_flow_indent:
self.increase_indent(flow=True, sequence=True)
ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column,
force_flow_indent)
self.write_indicator(' ' * ind + '[', True, whitespace=True)
if not force_flow_indent:
self.increase_indent(flow=True, sequence=True)
self.flow_context.append('[')
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
# type: () -> None
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped == '['
self.write_indicator(']', False)
if self.event.comment and self.event.comment[0]:
# eol comment on empty flow sequence
self.write_post_comment(self.event)
elif self.flow_level == 0:
self.write_line_break()
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
# type: () -> None
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped == '['
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator(']', False)
if self.event.comment and self.event.comment[0]:
# eol comment on flow sequence
self.write_post_comment(self.event)
else:
self.no_newline = False
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self, single=False, force_flow_indent=False):
# type: (Optional[bool], Optional[bool]) -> None
if force_flow_indent:
self.increase_indent(flow=True, sequence=False)
ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column,
force_flow_indent)
map_init = '{'
if (
single
and self.flow_level
and self.flow_context[-1] == '['
and not self.canonical
and not self.brace_single_entry_mapping_in_flow_sequence
):
# single map item with flow context, no curly braces necessary
map_init = ''
self.write_indicator(' ' * ind + map_init, True, whitespace=True)
self.flow_context.append(map_init)
if not force_flow_indent:
self.increase_indent(flow=True, sequence=False)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
# type: () -> None
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped == '{' # empty flow mapping
self.write_indicator('}', False)
if self.event.comment and self.event.comment[0]:
# eol comment on empty mapping
self.write_post_comment(self.event)
elif self.flow_level == 0:
self.write_line_break()
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
# type: () -> None
if isinstance(self.event, MappingEndEvent):
# if self.event.comment and self.event.comment[1]:
# self.write_pre_comment(self.event)
self.indent = self.indents.pop()
popped = self.flow_context.pop()
assert popped in ['{', '']
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
if popped != '':
self.write_indicator('}', False)
if self.event.comment and self.event.comment[0]:
# eol comment on flow mapping, never reached on empty mappings
self.write_post_comment(self.event)
else:
self.no_newline = False
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
# type: () -> None
self.write_indicator(self.prefixed_colon, False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
# type: () -> None
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(self.prefixed_colon, True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
# type: () -> None
if self.mapping_context:
indentless = not self.indention
else:
indentless = False
if not self.compact_seq_seq and self.column != 0:
self.write_line_break()
self.increase_indent(flow=False, sequence=True, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
# type: () -> Any
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
# type: (bool) -> None
if not first and isinstance(self.event, SequenceEndEvent):
if self.event.comment and self.event.comment[1]:
# final comments on a block list e.g. empty line
self.write_pre_comment(self.event)
self.indent = self.indents.pop()
self.state = self.states.pop()
self.no_newline = False
else:
if self.event.comment and self.event.comment[1]:
self.write_pre_comment(self.event)
nonl = self.no_newline if self.column == 0 else False
self.write_indent()
ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0
self.write_indicator(' ' * ind + '-', True, indention=True)
if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent:
self.no_newline = True
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
# type: () -> None
if not self.mapping_context and not (self.compact_seq_map or self.column == 0):
self.write_line_break()
self.increase_indent(flow=False, sequence=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
# type: () -> None
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
# type: (Any) -> None
if not first and isinstance(self.event, MappingEndEvent):
if self.event.comment and self.event.comment[1]:
# final comments from a doc
self.write_pre_comment(self.event)
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
if self.event.comment and self.event.comment[1]:
# final comments from a doc
self.write_pre_comment(self.event)
self.write_indent()
if self.check_simple_key():
if not isinstance(
self.event, (SequenceStartEvent, MappingStartEvent)
): # sequence keys
try:
if self.event.style == '?':
self.write_indicator('?', True, indention=True)
except AttributeError: # aliases have no style
pass
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
# test on style for alias in !!set
if isinstance(self.event, AliasEvent) and not self.event.style == '?':
self.stream.write(' ')
else:
self.write_indicator('?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
# type: () -> None
if getattr(self.event, 'style', None) != '?':
# prefix = ''
if self.indent == 0 and self.top_level_colon_align is not None:
# write non-prefixed colon
c = ' ' * (self.top_level_colon_align - self.column) + self.colon
else:
c = self.prefixed_colon
self.write_indicator(c, False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
# type: () -> None
self.write_indent()
self.write_indicator(self.prefixed_colon, True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
# type: () -> bool
return (
isinstance(self.event, SequenceStartEvent)
and bool(self.events)
and isinstance(self.events[0], SequenceEndEvent)
)
def check_empty_mapping(self):
# type: () -> bool
return (
isinstance(self.event, MappingStartEvent)
and bool(self.events)
and isinstance(self.events[0], MappingEndEvent)
)
def check_empty_document(self):
# type: () -> bool
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (
isinstance(event, ScalarEvent)
and event.anchor is None
and event.tag is None
and event.implicit
and event.value == ""
)
def check_simple_key(self):
# type: () -> bool
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if (
isinstance(self.event, (ScalarEvent, CollectionStartEvent))
and self.event.tag is not None
):
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return length < self.MAX_SIMPLE_KEY_LENGTH and (
isinstance(self.event, AliasEvent)
or (isinstance(self.event, SequenceStartEvent) and self.event.flow_style is True)
or (isinstance(self.event, MappingStartEvent) and self.event.flow_style is True)
or (
isinstance(self.event, ScalarEvent)
# if there is an explicit style for an empty string, it is a simple key
and not (self.analysis.empty and self.style and self.style not in '\'"')
and not self.analysis.multiline
)
or self.check_empty_sequence()
or self.check_empty_mapping()
)
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
# type: (Any) -> bool
if self.event.anchor is None:
self.prepared_anchor = None
return False
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator + self.prepared_anchor, True)
# issue 288
self.no_newline = False
self.prepared_anchor = None
return True
def process_tag(self):
# type: () -> None
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if (
self.event.value == ''
and self.style == "'"
and tag == 'tag:yaml.org,2002:null'
and self.alt_null is not None
):
self.event.value = self.alt_null
self.analysis = None
self.style = self.choose_scalar_style()
if (not self.canonical or tag is None) and (
(self.style == "" and self.event.implicit[0])
or (self.style != "" and self.event.implicit[1])
):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = '!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError('tag is not specified')
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
if (
self.sequence_context
and not self.flow_level
and isinstance(self.event, ScalarEvent)
):
self.no_newline = True
self.prepared_tag = None
def choose_scalar_style(self):
# type: () -> Any
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if (not self.event.style or self.event.style == '?') and (
self.event.implicit[0] or not self.event.implicit[2]
):
if not (
self.simple_key_context and (self.analysis.empty or self.analysis.multiline)
) and (
self.flow_level
and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain)
):
return ""
self.analysis.allow_block = True
if self.event.style and self.event.style in '|>':
if (
not self.flow_level
and not self.simple_key_context
and self.analysis.allow_block
):
return self.event.style
if not self.event.style and self.analysis.allow_double_quoted:
if "'" in self.event.value or '\n' in self.event.value:
return '"'
if not self.event.style or self.event.style == "'":
if self.analysis.allow_single_quoted and not (
self.simple_key_context and self.analysis.multiline
):
return "'"
return '"'
def process_scalar(self):
# type: () -> None
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = not self.simple_key_context
# if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
# nprint('xx', self.sequence_context, self.flow_level)
if self.sequence_context and not self.flow_level:
self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == "'":
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
if (
self.event.comment
and self.event.comment[0]
and self.event.comment[0].column >= self.indent
):
# comment following a folded scalar must dedent (issue 376)
self.event.comment[0].column = self.indent - 1 # type: ignore
elif self.style == '|':
# self.write_literal(self.analysis.scalar, self.event.comment)
try:
cmx = self.event.comment[1][0]
except (IndexError, TypeError):
cmx = ""
self.write_literal(self.analysis.scalar, cmx)
if (
self.event.comment
and self.event.comment[0]
and self.event.comment[0].column >= self.indent
):
# comment following a literal scalar must dedent (issue 376)
self.event.comment[0].column = self.indent - 1 # type: ignore
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
if self.event.comment:
self.write_post_comment(self.event)
# Analyzers.
def prepare_version(self, version):
# type: (Any) -> Any
major, minor = version
if major != 1:
raise EmitterError(
_F('unsupported YAML version: {major:d}.{minor:d}', major=major, minor=minor)
)
return _F('{major:d}.{minor:d}', major=major, minor=minor)
def prepare_tag_handle(self, handle):
# type: (Any) -> Any
if not handle:
raise EmitterError('tag handle must not be empty')
if handle[0] != '!' or handle[-1] != '!':
raise EmitterError(
_F("tag handle must start and end with '!': {handle!r}", handle=handle)
)
for ch in handle[1:-1]:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_'):
raise EmitterError(
_F(
'invalid character {ch!r} in the tag handle: {handle!r}',
ch=ch,
handle=handle,
)
)
return handle
def prepare_tag_prefix(self, prefix):
# type: (Any) -> Any
if not prefix:
raise EmitterError('tag prefix must not be empty')
chunks = [] # type: List[Any]
start = end = 0
if prefix[0] == '!':
end = 1
ch_set = "-;/?:@&=+$,_.~*'()[]"
if self.dumper:
version = getattr(self.dumper, 'version', (1, 2))
if version is None or version >= (1, 2):
ch_set += '#'
while end < len(prefix):
ch = prefix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in ch_set:
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end + 1
data = ch
for ch in data:
chunks.append(_F('%{ord_ch:02X}', ord_ch=ord(ch)))
if start < end:
chunks.append(prefix[start:end])
return "".join(chunks)
def prepare_tag(self, tag):
# type: (Any) -> Any
if not tag:
raise EmitterError('tag must not be empty')
if tag == '!':
return tag
handle = None
suffix = tag
prefixes = sorted(self.tag_prefixes.keys())
for prefix in prefixes:
if tag.startswith(prefix) and (prefix == '!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix) :]
chunks = [] # type: List[Any]
start = end = 0
ch_set = "-;/?:@&=+$,_.~*'()[]"
if self.dumper:
version = getattr(self.dumper, 'version', (1, 2))
if version is None or version >= (1, 2):
ch_set += '#'
while end < len(suffix):
ch = suffix[end]
if (
'0' <= ch <= '9'
or 'A' <= ch <= 'Z'
or 'a' <= ch <= 'z'
or ch in ch_set
or (ch == '!' and handle != '!')
):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end + 1
data = ch
for ch in data:
chunks.append(_F('%{ord_ch:02X}', ord_ch=ord(ch)))
if start < end:
chunks.append(suffix[start:end])
suffix_text = "".join(chunks)
if handle:
return _F('{handle!s}{suffix_text!s}', handle=handle, suffix_text=suffix_text)
else:
return _F('!<{suffix_text!s}>', suffix_text=suffix_text)
def prepare_anchor(self, anchor):
# type: (Any) -> Any
if not anchor:
raise EmitterError('anchor must not be empty')
for ch in anchor:
if not check_anchorname_char(ch):
raise EmitterError(
_F(
'invalid character {ch!r} in the anchor: {anchor!r}',
ch=ch,
anchor=anchor,
)
)
return anchor
def analyze_scalar(self, scalar):
# type: (Any) -> Any
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(
scalar=scalar,
empty=True,
multiline=False,
allow_flow_plain=False,
allow_block_plain=True,
allow_single_quoted=True,
allow_double_quoted=True,
allow_block=False,
)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith('---') or scalar.startswith('...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceeded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = len(scalar) == 1 or scalar[1] in '\0 \t\r\n\x85\u2028\u2029'
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in '#,[]{}&*!|>\'"%@`':
flow_indicators = True
block_indicators = True
if ch in '?:': # ToDo
if self.serializer.use_version == (1, 1):
flow_indicators = True
elif len(scalar) == 1: # single character
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in ',[]{}': # http://yaml.org/spec/1.2/spec.html#id2788859
flow_indicators = True
if ch == '?' and self.serializer.use_version == (1, 1):
flow_indicators = True
if ch == ':':
if followed_by_whitespace:
flow_indicators = True
block_indicators = True
if ch == '#' and preceeded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in '\n\x85\u2028\u2029':
line_breaks = True
if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
if (
ch == '\x85'
or '\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD'
or (self.unicode_supplementary and ('\U00010000' <= ch <= '\U0010FFFF'))
) and ch != '\uFEFF':
# unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == ' ':
if index == 0:
leading_space = True
if index == len(scalar) - 1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in '\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar) - 1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceeded_by_whitespace = ch in '\0 \t\r\n\x85\u2028\u2029'
followed_by_whitespace = (
index + 1 >= len(scalar) or scalar[index + 1] in '\0 \t\r\n\x85\u2028\u2029'
)
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if leading_space or leading_break or trailing_space or trailing_break:
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if special_characters:
allow_flow_plain = allow_block_plain = allow_single_quoted = allow_block = False
elif space_break:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
if not self.allow_space_break:
allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(
scalar=scalar,
empty=False,
multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block,
)
# Writers.
def flush_stream(self):
# type: () -> None
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# type: () -> None
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write('\uFEFF'.encode(self.encoding))
def write_stream_end(self):
# type: () -> None
self.flush_stream()
def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False):
# type: (Any, Any, bool, bool) -> None
if self.whitespace or not need_whitespace:
data = indicator
else:
data = ' ' + indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
# type: () -> None
indent = self.indent or 0
if (
not self.indention
or self.column > indent
or (self.column == indent and not self.whitespace)
):
if bool(self.no_newline):
self.no_newline = False
else:
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = ' ' * (indent - self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding) # type: ignore
self.stream.write(data)
def write_line_break(self, data=None):
# type: (Any) -> None
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
# type: (Any) -> None
data = _F('%YAML {version_text!s}', version_text=version_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
# type: (Any, Any) -> None
data = _F(
'%TAG {handle_text!s} {prefix_text!s}',
handle_text=handle_text,
prefix_text=prefix_text,
)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
# type: (Any, Any) -> None
if self.root_context:
if self.requested_indent is not None:
self.write_line_break()
if self.requested_indent != 0:
self.write_indent()
self.write_indicator("'", True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != ' ':
if (
start + 1 == end
and self.column > self.best_width
and split
and start != 0
and end != len(text)
):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029' or ch == "'":
if start < end:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == "'":
data = "''"
self.column += 2
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = ch == ' '
breaks = ch in '\n\x85\u2028\u2029'
end += 1
self.write_indicator("'", False)
ESCAPE_REPLACEMENTS = {
'\0': '0',
'\x07': 'a',
'\x08': 'b',
'\x09': 't',
'\x0A': 'n',
'\x0B': 'v',
'\x0C': 'f',
'\x0D': 'r',
'\x1B': 'e',
'"': '"',
'\\': '\\',
'\x85': 'N',
'\xA0': '_',
'\u2028': 'L',
'\u2029': 'P',
}
def write_double_quoted(self, text, split=True):
# type: (Any, Any) -> None
if self.root_context:
if self.requested_indent is not None:
self.write_line_break()
if self.requested_indent != 0:
self.write_indent()
self.write_indicator('"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if (
ch is None
or ch in '"\\\x85\u2028\u2029\uFEFF'
or not (
'\x20' <= ch <= '\x7E'
or (
self.allow_unicode
and ('\xA0' <= ch <= '\uD7FF' or '\uE000' <= ch <= '\uFFFD')
)
)
):
if start < end:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = '\\' + self.ESCAPE_REPLACEMENTS[ch]
elif ch <= '\xFF':
data = _F('\\x{ord_ch:02X}', ord_ch=ord(ch))
elif ch <= '\uFFFF':
data = _F('\\u{ord_ch:04X}', ord_ch=ord(ch))
else:
data = _F('\\U{ord_ch:08X}', ord_ch=ord(ch))
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if (
0 < end < len(text) - 1
and (ch == ' ' or start >= end)
and self.column + (end - start) > self.best_width
and split
):
data = text[start:end] + '\\'
if start < end:
start = end
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == ' ':
data = '\\'
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator('"', False)
def determine_block_hints(self, text):
# type: (Any) -> Any
indent = 0
indicator = ''
hints = ''
if text:
if text[0] in ' \n\x85\u2028\u2029':
indent = self.best_sequence_indent
hints += str(indent)
elif self.root_context:
for end in ['\n---', '\n...']:
pos = 0
while True:
pos = text.find(end, pos)
if pos == -1:
break
try:
if text[pos + 4] in ' \r\n':
break
except IndexError:
pass
pos += 1
if pos > -1:
break
if pos > 0:
indent = self.best_sequence_indent
if text[-1] not in '\n\x85\u2028\u2029':
indicator = '-'
elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
indicator = '+'
hints += indicator
return hints, indent, indicator
def write_folded(self, text):
# type: (Any) -> None
hints, _indent, _indicator = self.determine_block_hints(text)
self.write_indicator('>' + hints, True)
if _indicator == '+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029\a':
if (
not leading_space
and ch is not None
and ch != ' '
and text[start] == '\n'
):
self.write_line_break()
leading_space = ch == ' '
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != ' ':
if start + 1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029\a':
data = text[start:end]
self.column += len(data)
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
if ch == '\a':
if end < (len(text) - 1) and not text[end + 2].isspace():
self.write_line_break()
self.write_indent()
end += 2 # \a and the space that is inserted on the fold
else:
raise EmitterError('unexcpected fold indicator \\a before space')
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = ch in '\n\x85\u2028\u2029'
spaces = ch == ' '
end += 1
def write_literal(self, text, comment=None):
# type: (Any, Any) -> None
hints, _indent, _indicator = self.determine_block_hints(text)
# if comment is not None:
# try:
# hints += comment[1][0]
# except (TypeError, IndexError) as e:
# pass
if not isinstance(comment, str):
comment = ''
self.write_indicator('|' + hints + comment, True)
# try:
# nprintf('selfev', comment)
# cmx = comment[1][0]
# if cmx:
# self.stream.write(cmx)
# except (TypeError, IndexError) as e:
# pass
if _indicator == '+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
if self.root_context:
idnx = self.indent if self.indent is not None else 0
self.stream.write(' ' * (_indent + idnx))
else:
self.write_indent()
start = end
else:
if ch is None or ch in '\n\x85\u2028\u2029':
data = text[start:end]
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = ch in '\n\x85\u2028\u2029'
end += 1
def write_plain(self, text, split=True):
# type: (Any, Any) -> None
if self.root_context:
if self.requested_indent is not None:
self.write_line_break()
if self.requested_indent != 0:
self.write_indent()
else:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = ' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding) # type: ignore
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != ' ':
if start + 1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding) # type: ignore
self.stream.write(data)
start = end
elif breaks:
if ch not in '\n\x85\u2028\u2029': # type: ignore
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding) # type: ignore
try:
self.stream.write(data)
except: # NOQA
sys.stdout.write(repr(data) + '\n')
raise
start = end
if ch is not None:
spaces = ch == ' '
breaks = ch in '\n\x85\u2028\u2029'
end += 1
def write_comment(self, comment, pre=False):
# type: (Any, bool) -> None
value = comment.value
# nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value))
if not pre and value[-1] == '\n':
value = value[:-1]
try:
# get original column position
col = comment.start_mark.column
if comment.value and comment.value.startswith('\n'):
# never inject extra spaces if the comment starts with a newline
# and not a real comment (e.g. if you have an empty line following a key-value
col = self.column
elif col < self.column + 1:
ValueError
except ValueError:
col = self.column + 1
# nprint('post_comment', self.line, self.column, value)
try:
# at least one space if the current column >= the start column of the comment
# but not at the start of a line
nr_spaces = col - self.column
if self.column and value.strip() and nr_spaces < 1 and value[0] != '\n':
nr_spaces = 1
value = ' ' * nr_spaces + value
try:
if bool(self.encoding):
value = value.encode(self.encoding)
except UnicodeDecodeError:
pass
self.stream.write(value)
except TypeError:
raise
if not pre:
self.write_line_break()
def write_pre_comment(self, event):
# type: (Any) -> bool
comments = event.comment[1]
if comments is None:
return False
try:
start_events = (MappingStartEvent, SequenceStartEvent)
for comment in comments:
if isinstance(event, start_events) and getattr(comment, 'pre_done', None):
continue
if self.column != 0:
self.write_line_break()
self.write_comment(comment, pre=True)
if isinstance(event, start_events):
comment.pre_done = True
except TypeError:
sys.stdout.write('eventtt {} {}'.format(type(event), event))
raise
return True
def write_post_comment(self, event):
# type: (Any) -> bool
if self.event.comment[0] is None:
return False
comment = event.comment[0]
self.write_comment(comment)
return True
| Emitter |
python | kamyu104__LeetCode-Solutions | Python/repeated-string-match.py | {
"start": 33,
"end": 1116
} | class ____(object):
def repeatedStringMatch(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
def check(index):
return all(A[(i+index) % len(A)] == c
for i, c in enumerate(B))
M, p = 10**9+7, 113
p_inv = pow(p, M-2, M)
q = (len(B)+len(A)-1) // len(A)
b_hash, power = 0, 1
for c in B:
b_hash += power * ord(c)
b_hash %= M
power = (power*p) % M
a_hash, power = 0, 1
for i in xrange(len(B)):
a_hash += power * ord(A[i%len(A)])
a_hash %= M
power = (power*p) % M
if a_hash == b_hash and check(0): return q
power = (power*p_inv) % M
for i in xrange(len(B), (q+1)*len(A)):
a_hash = (a_hash-ord(A[(i-len(B))%len(A)])) * p_inv
a_hash += power * ord(A[i%len(A)])
a_hash %= M
if a_hash == b_hash and check(i-len(B)+1):
return q if i < q*len(A) else q+1
return -1
| Solution |
python | Pylons__pyramid | tests/test_viewderivers.py | {
"start": 71170,
"end": 71779
} | class ____(dict):
def get_csrf_token(self):
return self['csrf_token']
def parse_httpdate(s):
import datetime
# cannot use %Z, must use literal GMT; Jython honors timezone
# but CPython does not
return datetime.datetime.strptime(s, "%a, %d %b %Y %H:%M:%S GMT")
def assert_similar_datetime(one, two):
for attr in ('year', 'month', 'day', 'hour', 'minute'):
one_attr = getattr(one, attr)
two_attr = getattr(two, attr)
if not one_attr == two_attr: # pragma: no cover
raise AssertionError(f'{one_attr!r} != {two_attr!r} in {attr}')
| DummySession |
python | facelessuser__pymdown-extensions | pymdownx/critic.py | {
"start": 2784,
"end": 3765
} | class ____:
"""Stash critic marks until ready."""
def __init__(self, stash_key):
"""Initialize."""
self.stash_key = stash_key
self.stash = {}
self.count = 0
def __len__(self): # pragma: no cover
"""Get length of stash."""
return len(self.stash)
def get(self, key, default=None):
"""Get the specified item from the stash."""
code = self.stash.get(key, default)
return code
def remove(self, key): # pragma: no cover
"""Remove the specified item from the stash."""
del self.stash[key]
def store(self, code):
"""
Store the code in the stash with the placeholder.
Return placeholder.
"""
key = self.stash_key % str(self.count)
self.stash[key] = code
self.count += 1
return SOH + key + EOT
def clear(self):
"""Clear the stash."""
self.stash = {}
self.count = 0
| CriticStash |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 13148,
"end": 13236
} | class ____(PydanticValueError):
msg_template = 'invalid duration format'
| DurationError |
python | boto__boto3 | tests/__init__.py | {
"start": 965,
"end": 2030
} | class ____(unittest.TestCase):
"""
A base test case which mocks out the low-level session to prevent
any actual calls to Botocore.
"""
def setUp(self):
self.bc_session_patch = mock.patch('botocore.session.Session')
self.bc_session_cls = self.bc_session_patch.start()
loader = self.bc_session_cls.return_value.get_component.return_value
loader.data_path = ''
self.loader = loader
# We also need to patch the global default session.
# Otherwise it could be a cached real session came from previous
# "functional" or "integration" tests.
patch_global_session = mock.patch('boto3.DEFAULT_SESSION')
patch_global_session.start()
self.addCleanup(patch_global_session.stop)
def tearDown(self):
self.bc_session_patch.stop()
def requires_crt(reason=None):
if reason is None:
reason = "Test requires awscrt to be installed"
def decorator(func):
return unittest.skipIf(not HAS_CRT, reason)(func)
return decorator
| BaseTestCase |
python | walkccc__LeetCode | solutions/2170. Minimum Operations to Make the Array Alternating/2170.py | {
"start": 159,
"end": 791
} | class ____:
def minimumOperations(self, nums: list[int]) -> int:
# 0 := odd indices, 1 := even indices
ts = [T() for _ in range(2)]
for i, num in enumerate(nums):
t = ts[i % 2]
t.count[num] += 1
freq = t.count[num]
if freq > t.maxFreq:
t.maxFreq = freq
t.mx = num
elif freq > t.secondMaxFreq:
t.secondMaxFreq = freq
t.secondMax = num
if ts[0].mx == ts[1].mx:
return len(nums) - max(ts[0].maxFreq + ts[1].secondMaxFreq,
ts[1].maxFreq + ts[0].secondMaxFreq)
return len(nums) - (ts[0].maxFreq + ts[1].maxFreq)
| Solution |
python | sympy__sympy | sympy/codegen/cfunctions.py | {
"start": 2071,
"end": 3976
} | class ____(Function):
"""
Represents the natural logarithm of a number plus one.
Explanation
===========
The benefit of using ``log1p(x)`` over ``log(x + 1)``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import log1p
>>> from sympy import expand_log
>>> '%.0e' % expand_log(log1p(1e-99)).evalf()
'1e-99'
>>> from math import log
>>> log(1 + 1e-99)
0.0
>>> log1p(x).diff(x)
1/(x + 1)
See Also
========
expm1
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return S.One/(self.args[0] + S.One)
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _log1p(*self.args)
def _eval_rewrite_as_log(self, arg, **kwargs):
return _log1p(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_log
@classmethod
def eval(cls, arg):
if arg.is_Rational:
return log(arg + S.One)
elif not arg.is_Float: # not safe to add 1 to Float
return log.eval(arg + S.One)
elif arg.is_number:
return log(Rational(arg) + S.One)
def _eval_is_real(self):
return (self.args[0] + S.One).is_nonnegative
def _eval_is_finite(self):
if (self.args[0] + S.One).is_zero:
return False
return self.args[0].is_finite
def _eval_is_positive(self):
return self.args[0].is_positive
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_is_nonnegative(self):
return self.args[0].is_nonnegative
_Two = S(2)
def _exp2(x):
return Pow(_Two, x)
| log1p |
python | doocs__leetcode | lcof/面试题09. 用两个栈实现队列/Solution.py | {
"start": 0,
"end": 492
} | class ____:
def __init__(self):
self.stk1 = []
self.stk2 = []
def appendTail(self, value: int) -> None:
self.stk1.append(value)
def deleteHead(self) -> int:
if not self.stk2:
while self.stk1:
self.stk2.append(self.stk1.pop())
return -1 if not self.stk2 else self.stk2.pop()
# Your CQueue object will be instantiated and called as such:
# obj = CQueue()
# obj.appendTail(value)
# param_2 = obj.deleteHead()
| CQueue |
python | sqlalchemy__sqlalchemy | test/orm/test_cache_key.py | {
"start": 39087,
"end": 41157
} | class ____(DeclarativeMappedTest):
"""test #10570"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(ComparableMixin, Base):
__tablename__ = "a"
id = Column(Integer, Identity(), primary_key=True)
data = Column(String(30))
bs = relationship("B")
class B(ComparableMixin, Base):
__tablename__ = "b"
id = Column(Integer, Identity(), primary_key=True)
a_id = Column(ForeignKey("a.id"))
boolean = query_expression()
data = Column(String(30))
@classmethod
def insert_data(cls, connection):
A, B = cls.classes("A", "B")
with Session(connection) as s:
s.add(A(bs=[B(data="a"), B(data="b"), B(data="c")]))
s.commit()
@testing.combinations(
joinedload, lazyload, defaultload, selectinload, subqueryload
)
@testing.only_on(
["sqlite", "postgresql"],
"in-place boolean not generally available (Oracle, SQL Server)",
)
def test_from_opt(self, loadopt):
A, B = self.classes("A", "B")
def go(value):
with Session(testing.db) as sess:
objects = sess.execute(
select(A).options(
loadopt(A.bs).options(
with_expression(B.boolean, B.data == value)
)
)
).scalars()
if loadopt is joinedload:
objects = objects.unique()
eq_(
objects.all(),
[
A(
bs=[
B(data="a", boolean=value == "a"),
B(data="b", boolean=value == "b"),
B(data="c", boolean=value == "c"),
]
)
],
)
go("b")
go("c")
| WithExpresionLoaderOptTest |
python | bokeh__bokeh | src/bokeh/core/property_mixins.py | {
"start": 7131,
"end": 7421
} | class ____(HasProps):
''' Properties relevant to rendering fill regions.
Mirrors the BokehJS ``properties.Fill`` class.
'''
fill_color = Nullable(Color, default="gray", help=_color_help % "fill paths")
fill_alpha = Alpha(help=_alpha_help % "fill paths")
| ScalarFillProps |
python | nedbat__coveragepy | coverage/annotate.py | {
"start": 674,
"end": 3751
} | class ____:
"""Generate annotated source files showing line coverage.
This reporter creates annotated copies of the measured source files. Each
.py file is copied as a .py,cover file, with a left-hand margin annotating
each line::
> def h(x):
- if 0: #pragma: no cover
- pass
> if x == 1:
! a = 1
> else:
> a = 2
> h(2)
Executed lines use ">", lines not executed use "!", lines excluded from
consideration use "-".
"""
def __init__(self, coverage: Coverage) -> None:
self.coverage = coverage
self.config = self.coverage.config
self.directory: str | None = None
blank_re = re.compile(r"\s*(#|$)")
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
def report(self, morfs: Iterable[TMorf] | None, directory: str | None = None) -> None:
"""Run the report.
See `coverage.report()` for arguments.
"""
self.directory = directory
self.coverage.get_data()
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
self.annotate_file(fr, analysis)
def annotate_file(self, fr: FileReporter, analysis: Analysis) -> None:
"""Annotate a single file.
`fr` is the FileReporter for the file to annotate.
"""
statements = sorted(analysis.statements)
missing = sorted(analysis.missing)
excluded = sorted(analysis.excluded)
if self.directory:
ensure_dir(self.directory)
dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
assert dest_file.endswith("_py")
dest_file = dest_file[:-3] + ".py"
else:
dest_file = fr.filename
dest_file += ",cover"
with open(dest_file, "w", encoding="utf-8") as dest:
i = j = 0
covered = True
source = fr.source()
for lineno, line in enumerate(source.splitlines(True), start=1):
while i < len(statements) and statements[i] < lineno:
i += 1
while j < len(missing) and missing[j] < lineno:
j += 1
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
dest.write(" ")
elif self.else_re.match(line):
# Special logic for lines containing only "else:".
if j >= len(missing):
dest.write("> ")
elif statements[i] == missing[j]:
dest.write("! ")
else:
dest.write("> ")
elif lineno in excluded:
dest.write("- ")
elif covered:
dest.write("> ")
else:
dest.write("! ")
dest.write(line)
| AnnotateReporter |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_integration_details.py | {
"start": 4942,
"end": 10206
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-integration-details"
method = "get"
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1).isoformat()
self.integration = self.create_integration(
organization=self.organization,
external_id="jira:1",
provider="jira",
name="Jira Cloud",
metadata={
"oauth_client_id": "oauth-client-id",
"shared_secret": "a-super-secret-key-from-atlassian",
"base_url": "https://example.atlassian.net",
"domain_name": "example.atlassian.net",
},
)
self.user.name = "Sentry Admin"
self.user.save()
self.login_as(self.user)
self.integration.add_organization(self.organization, self.user)
@responses.activate
def test_serialize_organizationintegration_with_create_issue_config_for_jira(self) -> None:
"""Test the flow of choosing ticket creation on alert rule fire action
then serializes the issue config correctly for Jira"""
# Mock the legacy projects response
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/project",
json=[
{"id": "10000", "key": "PROJ1", "name": "Project 1"},
{"id": "10001", "key": "PROJ2", "name": "Project 2"},
],
)
# Mock the paginated projects response
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/project/search",
json={
"values": [
{"id": "10000", "key": "PROJ1", "name": "Project 1"},
{"id": "10001", "key": "PROJ2", "name": "Project 2"},
],
"total": 2,
},
)
# Mock the create issue metadata endpoint
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/issue/createmeta",
json={
"projects": [
{
"id": "10000",
"key": "PROJ1",
"name": "Project 1",
"issuetypes": [
{
"description": "An error in the code",
"fields": {
"issuetype": {
"key": "issuetype",
"name": "Issue Type",
"required": True,
}
},
"id": "bug1",
"name": "Bug",
}
],
}
]
},
)
params = {"action": "create"}
installation = self.integration.get_installation(self.organization.id)
response = self.get_success_response(
self.organization.slug,
self.integration.id,
qs_params=params,
)
data = response.data
# Check we serialized the integration correctly
assert data["id"] == str(self.integration.id)
assert data["name"] == self.integration.name
assert data["icon"] == self.integration.metadata.get("icon")
assert data["domainName"] == self.integration.metadata.get("domain_name")
assert data["accountType"] == self.integration.metadata.get("account_type")
assert data["scopes"] == self.integration.metadata.get("scopes")
assert data["status"] == self.integration.get_status_display()
# Check we serialized the provider correctly
resp_provider = data["provider"]
provider = self.integration.get_provider()
assert resp_provider["key"] == provider.key
assert resp_provider["slug"] == provider.key
assert resp_provider["name"] == provider.name
assert resp_provider["canAdd"] == provider.can_add
assert resp_provider["canDisable"] == provider.can_disable
assert resp_provider["features"] == sorted(f.value for f in provider.features)
assert resp_provider["aspects"] == getattr(provider.metadata, "aspects", {})
# Check we serialized the create issue config correctly
assert installation.get_create_issue_config(None, self.user) == data.get(
"createIssueConfig", {}
)
assert installation.get_organization_config() == data.get("configOrganization", {})
# Check we serialized the other organization integration details correctly
assert data["configData"] == installation.get_config_data()
assert data["externalId"] == self.integration.external_id
assert data["organizationId"] == self.organization.id
assert (
data["organizationIntegrationStatus"]
== self.organization_integration.get_status_display()
)
assert data["gracePeriodEnd"] == self.organization_integration.grace_period_end
| IssueOrganizationIntegrationDetailsGetTest |
python | huggingface__transformers | src/transformers/generation/streamers.py | {
"start": 9229,
"end": 12985
} | class ____(TextStreamer):
"""
Streamer that stores print-ready text in a queue, to be used by a downstream application as an async iterator.
This is useful for applications that benefit from accessing the generated text asynchronously (e.g. in an
interactive Gradio demo).
<Tip warning={true}>
The API for the streamer classes is still under development and may change in the future.
</Tip>
Parameters:
tokenizer (`AutoTokenizer`):
The tokenized used to decode the tokens.
skip_prompt (`bool`, *optional*, defaults to `False`):
Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
timeout (`float`, *optional*):
The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions
in `.generate()`, when it is called in a separate thread.
decode_kwargs (`dict`, *optional*):
Additional keyword arguments to pass to the tokenizer's `decode` method.
Raises:
TimeoutError: If token generation time exceeds timeout value.
Examples:
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, AsyncTextIteratorStreamer
>>> from threading import Thread
>>> import asyncio
>>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
>>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
>>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
>>> async def main():
... # Important: AsyncTextIteratorStreamer must be initialized inside a coroutine!
... streamer = AsyncTextIteratorStreamer(tok)
... generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20)
... thread = Thread(target=model.generate, kwargs=generation_kwargs)
... thread.start()
... generated_text = ""
... async for new_text in streamer:
... generated_text += new_text
>>> print(generated_text)
>>> asyncio.run(main())
An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,
```
"""
def __init__(
self, tokenizer: AutoTokenizer, skip_prompt: bool = False, timeout: float | None = None, **decode_kwargs
):
super().__init__(tokenizer, skip_prompt, **decode_kwargs)
self.text_queue = asyncio.Queue()
self.stop_signal = None
self.timeout = timeout
self.loop = asyncio.get_running_loop()
self.has_asyncio_timeout = hasattr(asyncio, "timeout")
def on_finalized_text(self, text: str, stream_end: bool = False):
"""Put the new text in the queue. If the stream is ending, also put a stop signal in the queue."""
self.loop.call_soon_threadsafe(self.text_queue.put_nowait, text)
if stream_end:
self.loop.call_soon_threadsafe(self.text_queue.put_nowait, self.stop_signal)
def __aiter__(self):
return self
async def __anext__(self):
try:
if self.has_asyncio_timeout:
async with asyncio.timeout(self.timeout):
value = await self.text_queue.get()
else:
value = await asyncio.wait_for(self.text_queue.get(), timeout=self.timeout)
except asyncio.TimeoutError:
raise TimeoutError()
else:
if value == self.stop_signal:
raise StopAsyncIteration()
else:
return value
| AsyncTextIteratorStreamer |
python | optuna__optuna | optuna/storages/_rdb/alembic/versions/v3.0.0.b.py | {
"start": 924,
"end": 1115
} | class ____(BaseModel):
__tablename__ = "trials"
trial_id = Column(Integer, primary_key=True)
number = Column(Integer)
state = Column(Enum(TrialState), nullable=False)
| TrialModel |
python | walkccc__LeetCode | solutions/1535. Find the Winner of an Array Game/1535.py | {
"start": 0,
"end": 268
} | class ____:
def getWinner(self, arr: list[int], k: int) -> int:
ans = arr[0]
wins = 0
i = 1
while i < len(arr) and wins < k:
if arr[i] > ans:
ans = arr[i]
wins = 1
else:
wins += 1
i += 1
return ans
| Solution |
python | tornadoweb__tornado | tornado/test/auth_test.py | {
"start": 4249,
"end": 4524
} | class ____(RequestHandler, OAuth2Mixin):
def initialize(self, test):
self._OAUTH_AUTHORIZE_URL = test.get_url("/oauth2/server/authorize")
def get(self):
res = self.authorize_redirect() # type: ignore
assert res is None
| OAuth2ClientLoginHandler |
python | tensorflow__tensorflow | tensorflow/compiler/tests/manip_ops_test.py | {
"start": 976,
"end": 2413
} | class ____(xla_test.XLATestCase):
"""Test cases for manip ops."""
def _testRoll(self, a, shift, axis):
with self.session() as session:
with self.test_scope():
p = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
output = manip_ops.roll(a, shift, axis)
result = session.run(output, {p: a})
self.assertAllEqual(result, np.roll(a, shift, axis))
def testNumericTypes(self):
for t in self.numeric_types:
self._testRoll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)
self._testRoll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -6, 6],
[0, 1, 2])
self._testRoll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],
[1, 2, 3])
def testFloatTypes(self):
for t in self.float_types:
self._testRoll(np.random.rand(5).astype(t), 2, 0)
self._testRoll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])
self._testRoll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])
def testComplexTypes(self):
for t in self.complex_types:
x = np.random.rand(4, 4).astype(t)
self._testRoll(x + 1j * x, 2, 0)
x = np.random.rand(2, 5).astype(t)
self._testRoll(x + 1j * x, [1, 2], [1, 0])
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testRoll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])
if __name__ == "__main__":
googletest.main()
| ManipOpsTest |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/GridItem.py | {
"start": 206,
"end": 7134
} | class ____(UIGraphicsItem):
"""
**Bases:** :class:`UIGraphicsItem <pyqtgraph.UIGraphicsItem>`
Displays a rectangular grid of lines indicating major divisions within a coordinate system.
Automatically determines what divisions to use.
"""
def __init__(self, pen='default', textPen='default'):
UIGraphicsItem.__init__(self)
#QtWidgets.QGraphicsItem.__init__(self, *args)
#self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemClipsToShape)
#self.setCacheMode(QtWidgets.QGraphicsItem.CacheMode.DeviceCoordinateCache)
self.opts = {}
self.setPen(pen)
self.setTextPen(textPen)
self.setTickSpacing(x=[None, None, None], y=[None, None, None])
def setPen(self, *args, **kwargs):
"""Set the pen used to draw the grid."""
if kwargs == {} and (args == () or args == ('default',)):
self.opts['pen'] = fn.mkPen(getConfigOption('foreground'))
else:
self.opts['pen'] = fn.mkPen(*args, **kwargs)
self.picture = None
self.update()
def setTextPen(self, *args, **kwargs):
"""Set the pen used to draw the texts."""
if kwargs == {} and (args == () or args == ('default',)):
self.opts['textPen'] = fn.mkPen(getConfigOption('foreground'))
else:
if args == (None,):
self.opts['textPen'] = None
else:
self.opts['textPen'] = fn.mkPen(*args, **kwargs)
self.picture = None
self.update()
def setTickSpacing(self, x=None, y=None):
"""
Set the grid tick spacing to use.
Tick spacing for each axis shall be specified as an array of
descending values, one for each tick scale. When the value
is set to None, grid line distance is chosen automatically
for this particular level.
Example:
Default setting of 3 scales for each axis:
setTickSpacing(x=[None, None, None], y=[None, None, None])
Single scale with distance of 1.0 for X axis, Two automatic
scales for Y axis:
setTickSpacing(x=[1.0], y=[None, None])
Single scale with distance of 1.0 for X axis, Two scales
for Y axis, one with spacing of 1.0, other one automatic:
setTickSpacing(x=[1.0], y=[1.0, None])
"""
self.opts['tickSpacing'] = (x or self.opts['tickSpacing'][0],
y or self.opts['tickSpacing'][1])
self.grid_depth = max([len(s) for s in self.opts['tickSpacing']])
self.picture = None
self.update()
def viewRangeChanged(self):
UIGraphicsItem.viewRangeChanged(self)
self.picture = None
#UIGraphicsItem.viewRangeChanged(self)
#self.update()
def paint(self, p, opt, widget):
#p.setPen(QtGui.QPen(QtGui.QColor(100, 100, 100)))
#p.drawRect(self.boundingRect())
#UIGraphicsItem.paint(self, p, opt, widget)
### draw picture
if self.picture is None:
#print "no pic, draw.."
self.generatePicture()
if self.picture is not None:
p.drawPicture(QtCore.QPointF(0, 0), self.picture)
#p.setPen(QtGui.QPen(QtGui.QColor(255,0,0)))
#p.drawLine(0, -100, 0, 100)
#p.drawLine(-100, 0, 100, 0)
#print "drawing Grid."
def generatePicture(self):
lvr = self.boundingRect()
device_transform = self.deviceTransform_()
if lvr.isNull() or device_transform is None:
return
self.picture = QtGui.QPicture()
p = QtGui.QPainter()
p.begin(self.picture)
vr = self.getViewWidget().rect()
unit = self.pixelWidth(), self.pixelHeight()
dim = [vr.width(), vr.height()]
ul = np.array([lvr.left(), lvr.top()])
br = np.array([lvr.right(), lvr.bottom()])
texts = []
if ul[1] > br[1]:
x = ul[1]
ul[1] = br[1]
br[1] = x
lastd = [None, None]
for i in range(self.grid_depth - 1, -1, -1):
dist = br-ul
nlTarget = 10.**i
d = 10. ** np.floor(np.log10(np.abs(dist/nlTarget))+0.5)
for ax in range(0,2):
ts = self.opts['tickSpacing'][ax]
try:
if ts[i] is not None:
d[ax] = ts[i]
except IndexError:
pass
lastd[ax] = d[ax]
ul1 = np.floor(ul / d) * d
br1 = np.ceil(br / d) * d
dist = br1-ul1
nl = (dist / d) + 0.5
for ax in range(0,2): ## Draw grid for both axes
if i >= len(self.opts['tickSpacing'][ax]):
continue
if d[ax] < lastd[ax]:
continue
ppl = dim[ax] / nl[ax]
c = int(fn.clip_scalar(5 * (ppl-3), 0, 50))
linePen = self.opts['pen']
lineColor = self.opts['pen'].color()
lineColor.setAlpha(c)
linePen.setColor(lineColor)
textPen = self.opts['textPen']
if textPen is not None:
textColor = self.opts['textPen'].color()
textColor.setAlpha(c * 2)
textPen.setColor(textColor)
bx = (ax+1) % 2
for x in range(0, int(nl[ax])):
linePen.setCosmetic(True)
p.setPen(linePen)
p1 = np.array([0.,0.])
p2 = np.array([0.,0.])
p1[ax] = ul1[ax] + x * d[ax]
p2[ax] = p1[ax]
p1[bx] = ul[bx]
p2[bx] = br[bx]
## don't draw lines that are out of bounds.
if p1[ax] < min(ul[ax], br[ax]) or p1[ax] > max(ul[ax], br[ax]):
continue
p.drawLine(QtCore.QPointF(p1[0], p1[1]), QtCore.QPointF(p2[0], p2[1]))
if i < 2 and textPen is not None:
if ax == 0:
x = p1[0] + unit[0]
y = ul[1] + unit[1] * 8.
else:
x = ul[0] + unit[0]*3
y = p1[1] + unit[1]
texts.append((QtCore.QPointF(x, y), "%g"%p1[ax]))
p.setWorldTransform(fn.invertQTransform(device_transform))
if textPen is not None and len(texts) > 0:
# if there is at least one text, then c is set
textColor.setAlpha(c * 2)
p.setPen(QtGui.QPen(textColor))
for t in texts:
x = device_transform.map(t[0]) + Point(0.5, 0.5)
p.drawText(x, t[1])
p.end()
| GridItem |
python | readthedocs__readthedocs.org | readthedocs/core/unresolver.py | {
"start": 873,
"end": 979
} | class ____(UnresolverError):
def __init__(self, scheme):
self.scheme = scheme
| InvalidSchemeError |
python | zarr-developers__zarr-python | src/zarr/codecs/numcodecs/_codecs.py | {
"start": 11622,
"end": 11701
} | class ____(_NumcodecsChecksumCodec, codec_name="fletcher32"):
pass
| Fletcher32 |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/metadata.py | {
"start": 4198,
"end": 4504
} | class ____(graphene.ObjectType):
jobName = graphene.NonNull(graphene.String)
repositoryName = graphene.Field(graphene.String)
locationName = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "JobMetadataEntry"
| GrapheneJobMetadataEntry |
python | spack__spack | lib/spack/spack/solver/requirements.py | {
"start": 452,
"end": 796
} | class ____(enum.Enum):
"""Purpose / provenance of a requirement"""
#: Default requirement expressed under the 'all' attribute of packages.yaml
DEFAULT = enum.auto()
#: Requirement expressed on a virtual package
VIRTUAL = enum.auto()
#: Requirement expressed on a specific package
PACKAGE = enum.auto()
| RequirementKind |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/controls.py | {
"start": 16498,
"end": 17066
} | class ____(UIControl):
"""
A dummy control object that doesn't paint any content.
Useful for filling a :class:`~prompt_toolkit.layout.Window`. (The
`fragment` and `char` attributes of the `Window` class can be used to
define the filling.)
"""
def create_content(self, width: int, height: int) -> UIContent:
def get_line(i: int) -> StyleAndTextTuples:
return []
return UIContent(get_line=get_line, line_count=100**100) # Something very big.
def is_focusable(self) -> bool:
return False
| DummyControl |
python | sphinx-doc__sphinx | sphinx/search/fi.py | {
"start": 193,
"end": 596
} | class ____(SearchLanguage):
lang = 'fi'
language_name = 'Finnish'
js_stemmer_rawcode = 'finnish-stemmer.js'
stopwords = FINNISH_STOPWORDS
def __init__(self, options: dict[str, str]) -> None:
super().__init__(options)
self.stemmer = snowballstemmer.stemmer('finnish')
def stem(self, word: str) -> str:
return self.stemmer.stemWord(word.lower())
| SearchFinnish |
python | kamyu104__LeetCode-Solutions | Python/combinations.py | {
"start": 39,
"end": 696
} | class ____(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
if k > n:
return []
nums, idxs = range(1, n+1), range(k)
result = [[nums[i] for i in idxs]]
while True:
for i in reversed(xrange(k)):
if idxs[i] != i+n-k:
break
else:
break
idxs[i] += 1
for j in xrange(i+1, k):
idxs[j] = idxs[j-1]+1
result.append([nums[i] for i in idxs])
return result
# Time: O(k * C(n, k))
# Space: O(k)
| Solution |
python | django__django | django/db/migrations/serializer.py | {
"start": 2568,
"end": 2904
} | class ____(BaseSerializer):
"""For datetime.datetime."""
def serialize(self):
if self.value.tzinfo is not None and self.value.tzinfo != datetime.UTC:
self.value = self.value.astimezone(datetime.UTC)
imports = ["import datetime"]
return repr(self.value), set(imports)
| DatetimeDatetimeSerializer |
python | falconry__falcon | tests/test_validators.py | {
"start": 2268,
"end": 2373
} | class ____:
def __init__(self, valid=True):
self.media = _VALID_MEDIA if valid else {}
| _MockReq |
python | getsentry__sentry | tests/sentry/api/serializers/test_event.py | {
"start": 855,
"end": 11764
} | class ____(TestCase, OccurrenceTestMixin):
def test_simple(self) -> None:
event_id = "a" * 32
event = self.store_event(
data={"event_id": event_id, "timestamp": before_now(minutes=1).isoformat()},
project_id=self.project.id,
)
result = serialize(event)
assert result["id"] == event_id
assert result["eventID"] == event_id
assert result["occurrence"] is None
def test_eventerror(self) -> None:
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"stacktrace": ["ü"],
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert len(result["errors"]) == 1
assert "data" in result["errors"][0]
assert result["errors"][0]["type"] == EventError.INVALID_DATA
assert result["errors"][0]["data"] == {
"name": "stacktrace",
"reason": "expected rawstacktrace",
"value": ["\xfc"],
}
assert "startTimestamp" not in result
assert "timestamp" not in result
def test_hidden_eventerror(self) -> None:
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"breadcrumbs": ["ü"],
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["errors"] == []
def test_renamed_attributes(self) -> None:
# Only includes meta for simple top-level attributes
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"extra": {"extra": True},
"modules": {"modules": "foobar"},
"_meta": {
"extra": {"": {"err": ["extra error"]}},
"modules": {"": {"err": ["modules error"]}},
},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["context"] == {"extra": True}
assert result["_meta"]["context"] == {"": {"err": ["extra error"]}}
assert result["packages"] == {"modules": "foobar"}
assert result["_meta"]["packages"] == {"": {"err": ["modules error"]}}
def test_message_interface(self) -> None:
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"logentry": {"formatted": "bar"},
"_meta": {"logentry": {"formatted": {"": {"err": ["some error"]}}}},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["message"] == "bar"
assert result["_meta"]["message"] == {"": {"err": ["some error"]}}
def test_message_formatted(self) -> None:
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"logentry": {"formatted": "baz"},
"_meta": {"logentry": {"formatted": {"": {"err": ["some error"]}}}},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["message"] == "baz"
assert result["_meta"]["message"] == {"": {"err": ["some error"]}}
def test_exception_interface(self) -> None:
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"exception": {
"values": [
{
"type": "ValidationError",
"value": "Bad request",
"stacktrace": {
"frames": [
{
"filename": "foo.py",
"lineno": 100,
"in_app": True,
"vars": {"foo": "[Filtered]"},
}
]
},
}
]
},
"_meta": {
"exception": {
"values": {
"0": {
"stacktrace": {
"frames": {
"0": {
"lineno": 100,
"in_app": True,
"vars": {"foo": {"": {"err": ["some error"]}}},
}
}
}
}
}
}
},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["entries"][0]["type"] == "exception"
# Exception interface data should be preserved
assert (
result["entries"][0]["data"]["values"][0]["stacktrace"]["frames"][0]["vars"]["foo"]
== "[Filtered]"
)
# Exception meta should be preserved
assert result["_meta"]["entries"][0]["data"]["values"]["0"]["stacktrace"]["frames"]["0"][
"vars"
]["foo"] == {"": {"err": ["some error"]}}
def test_tags_tuples(self) -> None:
event = self.store_event(
data={
"event_id": "a" * 32,
"level": "error", # creates a derived tag.
"timestamp": before_now(minutes=1).isoformat(),
"tags": [["foo", "foo"], ["bar", "bar"], ["last", "tag"], None],
"_meta": {
"tags": {
"0": {"1": {"": {"err": ["foo error"]}}},
"1": {"0": {"": {"err": ["bar error"]}}},
"3": {"": {"err": ["full error"]}},
}
},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
# Expect 3 custom tags + derived "level". The ``None``` entry is removed
# by the serializer as it cannot be rendered. Such entries are generated
# by Relay normalization.
assert len(result["tags"]) == 4
assert result["tags"][0]["value"] == "bar"
assert result["tags"][1]["value"] == "foo"
assert result["_meta"]["tags"]["0"]["key"] == {"": {"err": ["bar error"]}}
assert result["_meta"]["tags"]["1"]["value"] == {"": {"err": ["foo error"]}}
assert result["_meta"]["tags"].get("2") is None
def test_tags_dict(self) -> None:
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"tags": {"foo": "foo", "bar": "bar", "last": "tag"},
"_meta": {
"tags": {
"foo": {"": {"err": ["foo error"]}},
"bar": {"": {"err": ["bar error"]}},
}
},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["tags"][0]["value"] == "bar"
assert result["tags"][1]["value"] == "foo"
assert result["_meta"]["tags"]["0"]["value"] == {"": {"err": ["bar error"]}}
assert result["_meta"]["tags"]["1"]["value"] == {"": {"err": ["foo error"]}}
assert result["_meta"]["tags"].get("2") is None
def test_none_interfaces(self) -> None:
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": before_now(minutes=1).isoformat(),
"breadcrumbs": None,
"exception": None,
"logentry": None,
"request": None,
"user": None,
"contexts": None,
"sdk": None,
"_meta": None,
},
project_id=self.project.id,
)
result = serialize(event)
assert not any(e["type"] == "breadcrumbs" for e in result["entries"])
assert not any(e["type"] == "exception" for e in result["entries"])
assert not any(e["type"] == "message" for e in result["entries"])
assert not any(e["type"] == "request" for e in result["entries"])
assert result["user"] is None
assert result["sdk"] is None
assert result["contexts"] == {}
assert "startTimestamp" not in result
def test_transaction_event(self) -> None:
event_data = load_data("transaction")
event = self.store_event(data=event_data, project_id=self.project.id)
result = serialize(event)
assert isinstance(result["endTimestamp"], float)
assert result["endTimestamp"] == event.data.get("timestamp")
assert isinstance(result["startTimestamp"], float)
assert result["startTimestamp"] == event.data.get("start_timestamp")
assert "dateCreated" not in result
assert "crashFile" not in result
assert "fingerprints" not in result
assert "measurements" in result
assert result["measurements"] == event_data["measurements"]
assert "breakdowns" in result
assert result["breakdowns"] == event_data["breakdowns"]
def test_transaction_event_empty_spans(self) -> None:
event_data = load_data("transaction")
event_data["spans"] = []
event = self.store_event(data=event_data, project_id=self.project.id)
result = serialize(event)
assert result["entries"][0]["type"] == "spans"
def test_event_with_occurrence(self) -> None:
event = self.store_event(
data={},
project_id=self.project.id,
)
assert event.group is not None
event_group = event.for_group(event.group)
event_group.occurrence = occurrence = self.build_occurrence()
result = serialize(event_group)
assert result["occurrence"] == convert_dict_key_case(
occurrence.to_dict(), snake_to_camel_case
)
| EventSerializerTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType14.py | {
"start": 385,
"end": 456
} | class ____(Registry[type[BaseType], BaseType]):
pass
| BaseTypeRegistry |
python | python-pillow__Pillow | src/PIL/ImageFont.py | {
"start": 1467,
"end": 2447
} | class ____(IntEnum):
BASIC = 0
RAQM = 1
MAX_STRING_LENGTH = 1_000_000
core: ModuleType | DeferredError
try:
from . import _imagingft as core
except ImportError as ex:
core = DeferredError.new(ex)
def _string_length_check(text: str | bytes | bytearray) -> None:
if MAX_STRING_LENGTH is not None and len(text) > MAX_STRING_LENGTH:
msg = "too many characters in string"
raise ValueError(msg)
# FIXME: add support for pilfont2 format (see FontFile.py)
# --------------------------------------------------------------------
# Font metrics format:
# "PILfont" LF
# fontdescriptor LF
# (optional) key=value... LF
# "DATA" LF
# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)
#
# To place a character, cut out srcbox and paste at dstbox,
# relative to the character position. Then move the character
# position according to dx, dy.
# --------------------------------------------------------------------
| Layout |
python | ray-project__ray | doc/source/conf.py | {
"start": 6307,
"end": 22767
} | class ____(Version):
def __init__(self, version: str):
if isinstance(version, (str, bytes)):
super().__init__(version)
else:
super().__init__("0")
packaging_version.Version = MockVersion
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Ray"
copyright = str(datetime.now().year) + ", The Ray Team"
author = "The Ray Team"
# The version info for the project you're documenting acts as replacement for
# |version| and |release|, and is also used in various other places throughout the
# built documents. Retrieve the version using `find_version` rather than importing
# directly (from ray import __version__) because initializing ray will prevent
# mocking of certain external dependencies.
from setup import find_version # noqa
release = find_version("ray", "_version.py")
language = "en"
# autogen files are only used to auto-generate public API documentation.
# They are not included in the toctree to avoid warnings such as documents not included
# in any toctree.
autogen_files = [
"data/api/_autogen.rst",
]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# Also helps resolve warnings about documents not included in any toctree.
exclude_patterns = [
"templates/*",
"cluster/running-applications/doc/ray.*",
"data/api/ray.data.*.rst",
"ray-overview/examples/**/README.md", # Exclude .md files in examples subfolders
"train/examples/**/README.md",
"serve/tutorials/deployment-serve-llm/README.*",
"serve/tutorials/deployment-serve-llm/*/notebook.ipynb",
"data/examples/**/content/README.md",
"ray-overview/examples/llamafactory-llm-fine-tune/README.ipynb",
"ray-overview/examples/llamafactory-llm-fine-tune/**/*.ipynb",
] + autogen_files
# If "DOC_LIB" is found, only build that top-level navigation item.
build_one_lib = os.getenv("DOC_LIB")
all_toc_libs = [
f.path.strip("./") for f in os.scandir(".") if f.is_dir() and "ray-" in f.path
]
all_toc_libs += [
"cluster",
"tune",
"data",
"train",
"rllib",
"serve",
"llm",
"workflows",
]
if build_one_lib and build_one_lib in all_toc_libs:
all_toc_libs.remove(build_one_lib)
exclude_patterns += all_toc_libs
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Do not check anchors for links because it produces many false positives
# and is slow (it needs to download the linked website).
linkcheck_anchors = False
if os.environ.get("LINKCHECK_ALL"):
# Only check external links, i.e. the ones starting with http:// or https://.
linkcheck_ignore = [
r"^((?!http).)*$", # exclude links not starting with http
"http://ala2017.it.nuigalway.ie/papers/ALA2017_Gupta.pdf", # broken
"https://mvnrepository.com/artifact/*", # working but somehow not with linkcheck
# This should be fixed -- is temporal the successor of cadence? Do the examples need to be updated?
"https://github.com/serverlessworkflow/specification/blob/main/comparisons/comparison-cadence.md",
"https://www.oracle.com/java/technologies/javase-jdk15-downloads.html", # forbidden for client
"https://speakerdeck.com/*", # forbidden for bots
r"https://huggingface.co/*", # seems to be flaky
r"https://www.meetup.com/*", # seems to be flaky
r"https://www.pettingzoo.ml/*", # seems to be flaky
r"http://localhost[:/].*", # Ignore localhost links
r"^http:/$", # Ignore incomplete links
# 403 Client Error: Forbidden for url.
# They ratelimit bots.
"https://www.datanami.com/2018/02/01/rays-new-library-targets-high-speed-reinforcement-learning/",
# 403 Client Error: Forbidden for url.
# They ratelimit bots.
"https://www.researchgate.net/publication/222573328_Stochastic_Gradient_Boosting",
"https://www.datanami.com/2019/11/05/why-every-python-developer-will-love-ray/",
"https://dev.mysql.com/doc/connector-python/en/",
# Returning 522s intermittently.
"https://lczero.org/",
# Returns 406 but remains accessible
"https://www.uber.com/blog/elastic-xgboost-ray/",
# Aggressive anti-bot checks
"https://archive.vn/*",
"https://archive.is/*",
# 429: Rate limited
"https://medium.com/*",
"https://towardsdatascience.com/*",
]
else:
# Only check links that point to the ray-project org on github, since those
# links are under our control and therefore much more likely to be real
# issues that we need to fix if they are broken.
linkcheck_ignore = [
r"^(?!https://(raw\.githubusercontent|github)\.com/ray-project/).*$"
]
# -- Options for HTML output ----------------------------------------------
def render_svg_logo(path):
with open(pathlib.Path(__file__).parent / path, "r") as f:
content = f.read()
return content
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"use_edit_page_button": True,
"announcement": """Try Ray with $100 credit — <a target="_blank" href="https://console.anyscale.com/register/ha?render_flow=ray&utm_source=ray_docs&utm_medium=docs&utm_campaign=banner">Start now</a><button type="button" id="close-banner" aria-label="Close banner">×</button>""",
"logo": {
"svg": render_svg_logo("_static/img/ray_logo.svg"),
},
"navbar_start": ["navbar-ray-logo"],
"navbar_end": [
"theme-switcher",
"version-switcher",
"navbar-icon-links",
"navbar-anyscale",
],
"navbar_center": ["navbar-links"],
"navbar_align": "left",
"secondary_sidebar_items": [
"page-toc",
"edit-on-github",
],
"content_footer_items": [
"csat",
],
"navigation_depth": 4,
"pygment_light_style": "stata-dark",
"pygment_dark_style": "stata-dark",
"switcher": {
"json_url": "https://docs.ray.io/en/master/_static/versions.json",
"version_match": os.getenv("READTHEDOCS_VERSION", "master"),
},
}
html_context = {
"github_user": "ray-project",
"github_repo": "ray",
"github_version": "master",
"doc_path": "doc/source/",
}
html_sidebars = {
"**": [
(
"main-sidebar-readthedocs"
if os.getenv("READTHEDOCS") == "True"
else "main-sidebar"
)
],
"ray-overview/examples": [],
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f"Ray {release}"
autodoc_typehints_format = "short"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = "Raydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
latex_documents = [
(master_doc, "Ray.tex", "Ray Documentation", author, "manual"),
]
# -- Options for manual page output ---------------------------------------
man_pages = [(master_doc, "ray", "Ray Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [
(
master_doc,
"Ray",
"Ray Documentation",
author,
"Ray",
"Ray provides a simple, universal API for building distributed applications.",
"Miscellaneous",
),
]
# Python methods should be presented in source code order
autodoc_member_order = "bysource"
# Better typehint formatting (see custom.css)
autodoc_typehints = "signature"
def filter_out_undoc_class_members(member_name, class_name, module_name):
module = import_module(module_name)
cls = getattr(module, class_name)
if getattr(cls, member_name).__doc__:
return f"~{class_name}.{member_name}"
else:
return ""
def has_public_constructor(class_name, module_name):
cls = getattr(import_module(module_name), class_name)
return _is_public_api(cls)
def get_api_groups(method_names, class_name, module_name):
api_groups = set()
cls = getattr(import_module(module_name), class_name)
for method_name in method_names:
method = getattr(cls, method_name)
if _is_public_api(method):
api_groups.add(
safe_getattr(method, "_annotated_api_group", DEFAULT_API_GROUP)
)
return sorted(api_groups)
def select_api_group(method_names, class_name, module_name, api_group):
cls = getattr(import_module(module_name), class_name)
return [
method_name
for method_name in method_names
if _is_public_api(getattr(cls, method_name))
and _is_api_group(getattr(cls, method_name), api_group)
]
def _is_public_api(obj):
api_type = safe_getattr(obj, "_annotated_type", None)
if not api_type:
return False
return api_type.value == "PublicAPI"
def _is_api_group(obj, group):
return safe_getattr(obj, "_annotated_api_group", DEFAULT_API_GROUP) == group
FILTERS["filter_out_undoc_class_members"] = filter_out_undoc_class_members
FILTERS["get_api_groups"] = get_api_groups
FILTERS["select_api_group"] = select_api_group
FILTERS["has_public_constructor"] = has_public_constructor
def add_custom_assets(
app: sphinx.application.Sphinx,
pagename: str,
templatename: str,
context: Dict[str, Any],
doctree: nodes.Node,
):
"""Add custom per-page assets.
See documentation on Sphinx Core Events for more information:
https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx-core-events
"""
if pagename == "index":
app.add_css_file("css/index.css")
app.add_js_file("js/index.js")
return "index.html" # Use the special index.html template for this page
if pagename == "ray-overview/examples":
app.add_css_file("css/examples.css")
app.add_js_file("js/examples.js")
return "ray-overview/examples.html"
if pagename in [
"data/examples",
"train/examples",
"serve/examples",
]:
return "examples.html"
if pagename == "train/train":
app.add_css_file("css/ray-train.css")
elif pagename == "ray-overview/ray-libraries":
app.add_css_file("css/ray-libraries.css")
elif pagename == "ray-overview/use-cases":
app.add_css_file("css/use_cases.css")
def _autogen_apis(app: sphinx.application.Sphinx):
"""
Auto-generate public API documentation.
"""
generate.generate_autosummary_docs(
[os.path.join(app.srcdir, file) for file in autogen_files],
app=app,
)
def process_signature(app, what, name, obj, options, signature, return_annotation):
# Sphinx is unable to render dataclass with factory/`field`
# https://github.com/sphinx-doc/sphinx/issues/10893
if what == "class" and is_dataclass(obj):
return signature.replace("<factory>", "..."), return_annotation
def setup(app):
# Only generate versions JSON during RTD build
if os.getenv("READTHEDOCS") == "True":
generate_versions_json()
pregenerate_example_rsts(app)
# NOTE: 'MOCK' is a custom option we introduced to illustrate mock outputs. Since
# `doctest` doesn't support this flag by default, `sphinx.ext.doctest` raises
# warnings when we build the documentation.
import doctest
doctest.register_optionflag("MOCK")
app.connect("html-page-context", update_context)
app.add_config_value("navbar_content_path", "navbar.yml", "env")
app.connect("config-inited", parse_navbar_config)
app.connect("html-page-context", setup_context)
app.connect("html-page-context", add_custom_assets)
# https://github.com/ines/termynal
app.add_js_file("js/termynal.js", defer="defer")
app.add_css_file("css/termynal.css")
app.add_js_file("js/custom.js", defer="defer")
app.add_css_file("css/custom.css", priority=800)
app.add_js_file("js/csat.js", defer="defer")
app.add_css_file("css/csat.css")
app.add_js_file("js/assistant.js", defer="defer")
app.add_css_file("css/assistant.css")
app.add_js_file("js/dismissable-banner.js", defer="defer")
app.add_css_file("css/dismissable-banner.css")
base_path = pathlib.Path(__file__).parent
github_docs = DownloadAndPreprocessEcosystemDocs(base_path)
# Download docs from ecosystem library repos
app.connect("builder-inited", github_docs.write_new_docs)
# Restore original file content after build
app.connect("build-finished", github_docs.write_original_docs)
# Hook into the logger used by linkcheck to display a summary at the end.
linkcheck_summarizer = LinkcheckSummarizer()
app.connect("builder-inited", linkcheck_summarizer.add_handler_to_linkcheck)
app.connect("build-finished", linkcheck_summarizer.summarize)
# Hook into the auto generation of public apis
app.connect("builder-inited", _autogen_apis)
app.connect("autodoc-process-signature", process_signature)
class DuplicateObjectFilter(logging.Filter):
def filter(self, record):
# Intentionally allow duplicate object description of ray.actor.ActorMethod.bind:
# once in Ray Core API and once in Compiled Graph API
if (
"duplicate object description of ray.actor.ActorMethod.bind"
in record.getMessage()
):
return False # Don't log this specific warning
return True # Log all other warnings
logging.getLogger("sphinx").addFilter(DuplicateObjectFilter())
redoc = [
{
"name": "Ray Jobs API",
"page": "cluster/running-applications/job-submission/api",
"spec": "cluster/running-applications/job-submission/openapi.yml",
"embed": True,
},
]
redoc_uri = "https://cdn.redoc.ly/redoc/latest/bundles/redoc.standalone.js"
autosummary_filename_map = {
"ray.serve.deployment": "ray.serve.deployment_decorator",
"ray.serve.Deployment": "ray.serve.Deployment",
}
# Mock out external dependencies here.
autodoc_mock_imports = [
"aiohttp",
"async_timeout",
"backoff",
"cachetools",
"composer",
"cupy",
"dask",
"datasets",
"fastapi",
"filelock",
"fsspec",
"google",
"grpc",
"gymnasium",
"horovod",
"huggingface",
"httpx",
"joblib",
"lightgbm",
"lightgbm_ray",
"nevergrad",
"numpy",
"pandas",
"pyarrow",
"pyarrow.compute",
"pytorch_lightning",
"scipy",
"setproctitle",
"skimage",
"sklearn",
"starlette",
"tensorflow",
"torch",
"torchvision",
"transformers",
"tree",
"typer",
"uvicorn",
"wandb",
"watchfiles",
"openai",
"xgboost",
"xgboost_ray",
"psutil",
"colorama",
"grpc",
"vllm",
# Internal compiled modules
"ray._raylet",
"ray.core.generated",
"ray.serve.generated",
]
for mock_target in autodoc_mock_imports:
if mock_target in sys.modules:
logger.info(
f"Potentially problematic mock target ({mock_target}) found; "
"autodoc_mock_imports cannot mock modules that have already "
"been loaded into sys.modules when the sphinx build starts."
)
| MockVersion |
python | google__jax | tests/lax_numpy_test.py | {
"start": 252560,
"end": 254194
} | class ____(jtu.JaxTestCase):
def test_lax_numpy_docstrings(self):
unimplemented = ['fromfile', 'fromiter']
aliases = ['abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh', 'atan2',
'amax', 'amin', 'around', 'bitwise_invert', 'bitwise_left_shift',
'bitwise_not','bitwise_right_shift', 'conj', 'degrees', 'divide',
'get_printoptions', 'mod', 'pow', 'printoptions', 'radians', 'round_',
'set_printoptions']
skip_args_check = ['vsplit', 'hsplit', 'dsplit', 'array_split']
for name in dir(jnp):
if name.startswith('_') or name in unimplemented:
continue
obj = getattr(jnp, name)
if isinstance(obj, type) or not callable(obj):
# Skip docstring checks for non-functions
pass
elif hasattr(np, name) and obj is getattr(np, name):
# Some APIs are imported directly from NumPy; we don't check these.
pass
elif name in aliases:
assert "Alias of" in obj.__doc__
elif name not in skip_args_check:
# Other functions should have nontrivial docs including "Args" and "Returns".
doc = obj.__doc__
self.assertNotEmpty(doc)
self.assertIn("Args:", doc, msg=f"'Args:' not found in docstring of jnp.{name}")
self.assertIn("Returns:", doc, msg=f"'Returns:' not found in docstring of jnp.{name}")
if name not in ["frompyfunc", "isdtype", "promote_types"]:
self.assertIn("Examples:", doc, msg=f"'Examples:' not found in docstring of jnp.{name}")
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| NumpyDocTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.