language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/metadata.py | {
"start": 4237,
"end": 6859
} | class ____(MetadataCheck):
name = "Python connectors must have a CDK tag in metadata"
description = f"Python connectors must have a CDK tag in their metadata. It must be set in the `tags` field in {consts.METADATA_FILE_NAME}. The values can be `cdk:low-code`, `cdk:python`, or `cdk:file`."
applies_to_connector_languages = [ConnectorLanguage.PYTHON, ConnectorLanguage.LOW_CODE]
class CDKTag:
LOW_CODE = "cdk:low-code"
PYTHON = "cdk:python"
FILE = "cdk:python-file-based"
def get_expected_cdk_tag(self, connector: Connector) -> str:
manifest_file = connector.code_directory / connector.technical_name.replace("-", "_") / consts.LOW_CODE_MANIFEST_FILE_NAME
pyproject_file = connector.code_directory / consts.PYPROJECT_FILE_NAME
setup_py_file = connector.code_directory / consts.SETUP_PY_FILE_NAME
if manifest_file.exists():
return self.CDKTag.LOW_CODE
if pyproject_file.exists():
pyproject = toml.load((connector.code_directory / consts.PYPROJECT_FILE_NAME))
cdk_deps = pyproject["tool"]["poetry"]["dependencies"].get("airbyte-cdk", None)
if cdk_deps and isinstance(cdk_deps, dict) and "file-based" in cdk_deps.get("extras", []):
return self.CDKTag.FILE
if setup_py_file.exists():
if "airbyte-cdk[file-based]" in (connector.code_directory / consts.SETUP_PY_FILE_NAME).read_text():
return self.CDKTag.FILE
return self.CDKTag.PYTHON
def _run(self, connector: Connector) -> CheckResult:
current_cdk_tags = [t for t in connector.metadata.get("tags", []) if t.startswith("cdk:")]
expected_cdk_tag = self.get_expected_cdk_tag(connector)
if not current_cdk_tags:
return self.fail(
connector=connector,
message="CDK tag is missing in the metadata file",
)
if len(current_cdk_tags) > 1:
return self.fail(
connector=connector,
message=f"Multiple CDK tags found in the metadata file: {current_cdk_tags}",
)
if current_cdk_tags[0] != expected_cdk_tag:
return self.fail(
connector=connector,
message=f"Expected CDK tag '{self.get_expected_cdk_tag(connector)}' in the {consts.METADATA_FILE_NAME} file, but found '{current_cdk_tags[0]}'",
)
return self.pass_(
connector=connector,
message=f"CDK tag {self.get_expected_cdk_tag(connector)} is present in the metadata file",
)
| CheckConnectorCDKTag |
python | huggingface__transformers | src/transformers/models/rt_detr_v2/modular_rt_detr_v2.py | {
"start": 28019,
"end": 29237
} | class ____(RTDetrForObjectDetection, RTDetrV2PreTrainedModel):
_tied_weights_keys = {
r"bbox_embed.(?![0])\d+": r"bbox_embed.0",
r"class_embed.(?![0])\d+": r"^class_embed.0",
"model.decoder.class_embed": "class_embed",
"model.decoder.bbox_embed": "bbox_embed",
}
def __init__(self, config: RTDetrV2Config):
RTDetrV2PreTrainedModel.__init__(self, config)
# RTDETR encoder-decoder model
self.model = RTDetrV2Model(config)
self.class_embed = nn.ModuleList(
[torch.nn.Linear(config.d_model, config.num_labels) for _ in range(config.decoder_layers)]
)
self.bbox_embed = nn.ModuleList(
[
RTDetrV2MLPPredictionHead(config, config.d_model, config.d_model, 4, num_layers=3)
for _ in range(config.decoder_layers)
]
)
self.model.decoder.class_embed = self.class_embed
self.model.decoder.bbox_embed = self.bbox_embed
# Initialize weights and apply final processing
self.post_init()
__all__ = [
"RTDetrV2Config",
"RTDetrV2Model",
"RTDetrV2PreTrainedModel",
"RTDetrV2ForObjectDetection",
]
| RTDetrV2ForObjectDetection |
python | walkccc__LeetCode | solutions/3259. Maximum Energy Boost From Two Drinks/3259.py | {
"start": 0,
"end": 379
} | class ____:
def maxEnergyBoost(
self,
energyDrinkA: list[int],
energyDrinkB: list[int]
) -> int:
dpA = 0 # the maximum energy boost if the last drink is A
dpB = 0 # the maximum energy boost if the last drink is B
for a, b in zip(energyDrinkA, energyDrinkB):
dpA, dpB = max(dpB, dpA + a), max(dpA, dpB + b)
return max(dpA, dpB)
| Solution |
python | getsentry__sentry | src/sentry/core/endpoints/project_keys.py | {
"start": 1312,
"end": 5615
} | class ____(ProjectEndpoint):
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
"POST": ApiPublishStatus.PUBLIC,
}
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=40, window=1),
RateLimitCategory.USER: RateLimit(limit=40, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=40, window=1),
},
"POST": {
RateLimitCategory.IP: RateLimit(limit=40, window=1),
RateLimitCategory.USER: RateLimit(limit=40, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=40, window=1),
},
},
)
@extend_schema(
operation_id="List a Project's Client Keys",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
CursorQueryParam,
ProjectParams.STATUS,
],
responses={
200: inline_sentry_response_serializer(
"ListClientKeysResponse", list[ProjectKeySerializerResponse]
),
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
},
examples=ProjectExamples.LIST_CLIENT_KEYS,
)
def get(self, request: Request, project) -> Response:
"""
Return a list of client keys bound to a project.
"""
queryset = ProjectKey.objects.for_request(request).filter(
project=project, roles=F("roles").bitor(ProjectKey.roles.store)
)
status = request.GET.get("status")
if status == "active":
queryset = queryset.filter(status=ProjectKeyStatus.ACTIVE)
elif status == "inactive":
queryset = queryset.filter(status=ProjectKeyStatus.INACTIVE)
elif status:
queryset = queryset.none()
return self.paginate(
request=request,
queryset=queryset,
order_by="-id",
default_per_page=10,
on_results=lambda x: serialize(x, request.user, request=request),
)
@extend_schema(
operation_id="Create a New Client Key",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
],
request=ProjectKeyPostSerializer,
responses={
201: ProjectKeySerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
},
examples=ProjectExamples.CLIENT_KEY_RESPONSE,
)
def post(self, request: Request, project) -> Response:
"""
Create a new client key bound to a project. The key's secret and public key
are generated by the server.
"""
serializer = ProjectKeyPostSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
result = serializer.validated_data
rate_limit_count = None
rate_limit_window = None
if features.has("projects:rate-limits", project):
ratelimit = result.get("rateLimit", -1)
if ratelimit != -1 and (ratelimit["count"] and ratelimit["window"]):
rate_limit_count = result["rateLimit"]["count"]
rate_limit_window = result["rateLimit"]["window"]
if is_active_superuser(request):
use_case = result.get("useCase", UseCase.USER.value)
else:
use_case = UseCase.USER.value
key = ProjectKey.objects.create(
project=project,
label=result.get("name"),
public_key=result.get("public"),
secret_key=result.get("secret"),
rate_limit_count=rate_limit_count,
rate_limit_window=rate_limit_window,
data=get_default_loader_data(project),
use_case=use_case,
)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=key.id,
event=audit_log.get_event_id("PROJECTKEY_ADD"),
data=key.get_audit_log_data(),
)
return Response(serialize(key, request.user, request=request), status=201)
| ProjectKeysEndpoint |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/run_launcher.py | {
"start": 707,
"end": 1770
} | class ____(BaseModel):
image: kubernetes.Image
imagePullPolicy: Optional[kubernetes.PullPolicy] = None
nameOverride: str
configSource: dict
workerQueues: list[CeleryWorkerQueue] = Field(min_items=1)
env: dict[str, str]
envConfigMaps: list[kubernetes.ConfigMapEnvSource]
envSecrets: list[kubernetes.SecretEnvSource]
annotations: kubernetes.Annotations
nodeSelector: kubernetes.NodeSelector
affinity: kubernetes.Affinity
tolerations: kubernetes.Tolerations
podSecurityContext: kubernetes.PodSecurityContext
securityContext: kubernetes.SecurityContext
resources: kubernetes.Resources
checkDbReadyInitContainer: Optional[bool] = None
livenessProbe: kubernetes.LivenessProbe
volumeMounts: list[kubernetes.VolumeMount]
volumes: list[kubernetes.Volume]
labels: Optional[dict[str, str]] = None
failPodOnRunFailure: Optional[bool] = None
schedulerName: Optional[str] = None
jobNamespace: Optional[str] = None
model_config = ConfigDict(extra="forbid")
| CeleryK8sRunLauncherConfig |
python | numba__numba | numba/tests/test_comprehension.py | {
"start": 603,
"end": 7500
} | class ____(TestCase):
def test_comp_list(self):
pyfunc = comp_list
cfunc = njit((types.intp,))(pyfunc)
self.assertEqual(cfunc(5), pyfunc(5))
self.assertEqual(cfunc(0), pyfunc(0))
self.assertEqual(cfunc(-1), pyfunc(-1))
def test_bulk_use_cases(self):
""" Tests the large number of use cases defined below """
# jitted function used in some tests
@jit(nopython=True)
def fib3(n):
if n < 2:
return n
return fib3(n - 1) + fib3(n - 2)
def list1(x):
""" Test basic list comprehension """
return [i for i in range(1, len(x) - 1)]
def list2(x):
""" Test conditional list comprehension """
return [y for y in x if y < 2]
def list3(x):
""" Test ternary list comprehension """
return [y if y < 2 else -1 for y in x]
def list4(x):
""" Test list comprehension to np.array ctor """
return np.array([1, 2, 3])
# expected fail, unsupported type in sequence
def list5(x):
""" Test nested list comprehension to np.array ctor """
return np.array([np.array([z for z in x]) for y in x])
def list6(x):
""" Test use of inner function in list comprehension """
def inner(x):
return x + 1
return [inner(z) for z in x]
def list7(x):
""" Test use of closure in list comprehension """
y = 3
def inner(x):
return x + y
return [inner(z) for z in x]
def list8(x):
""" Test use of list comprehension as arg to inner function """
l = [z + 1 for z in x]
def inner(x):
return x[0] + 1
q = inner(l)
return q
def list9(x):
""" Test use of list comprehension access in closure """
l = [z + 1 for z in x]
def inner(x):
return x[0] + l[1]
return inner(x)
def list10(x):
""" Test use of list comprehension access in closure and as arg """
l = [z + 1 for z in x]
def inner(x):
return [y + l[0] for y in x]
return inner(l)
def list11(x):
""" Test scalar array construction in list comprehension """
l = [np.array(z) for z in x]
return l
def list12(x):
""" Test scalar type conversion construction in list comprehension """
l = [np.float64(z) for z in x]
return l
def list13(x):
""" Test use of explicit numpy scalar ctor reference in list comprehension """
l = [numpy.float64(z) for z in x]
return l
def list14(x):
""" Test use of python scalar ctor reference in list comprehension """
l = [float(z) for z in x]
return l
def list15(x):
""" Test use of python scalar ctor reference in list comprehension followed by np array construction from the list"""
l = [float(z) for z in x]
return np.array(l)
def list16(x):
""" Test type unification from np array ctors consuming list comprehension """
l1 = [float(z) for z in x]
l2 = [z for z in x]
ze = np.array(l1)
oe = np.array(l2)
return ze + oe
def list17(x):
""" Test complex list comprehension including math calls """
return [(a, b, c)
for a in x for b in x for c in x if np.sqrt(a**2 + b**2) == c]
_OUTER_SCOPE_VAR = 9
def list18(x):
""" Test loop list with outer scope var as conditional"""
z = []
for i in x:
if i < _OUTER_SCOPE_VAR:
z.append(i)
return z
_OUTER_SCOPE_VAR = 9
def list19(x):
""" Test list comprehension with outer scope as conditional"""
return [i for i in x if i < _OUTER_SCOPE_VAR]
def list20(x):
""" Test return empty list """
return [i for i in x if i == -1000]
def list21(x):
""" Test call a jitted function in a list comprehension """
return [fib3(i) for i in x]
def list22(x):
""" Test create two lists comprehensions and a third walking the first two """
a = [y - 1 for y in x]
b = [y + 1 for y in x]
return [x for x in a for y in b if x == y]
def list23(x):
""" Test operation on comprehension generated list """
z = [y for y in x]
z.append(1)
return z
def list24(x):
""" Test type promotion """
z = [float(y) if y > 3 else y for y in x]
return z
def list25(x):
# See issue #6260. Old style inline_closure_call uses get_ir_of_code
# for the closure->IR transform, without SSA there's multiply
# defined labels, the unary negation is self referent and DCE runs
# eliminating the duplicated labels.
included = np.array([1, 2, 6, 8])
not_included = [i for i in range(10) if i not in list(included)]
return not_included
# functions to test that are expected to pass
f = [list1, list2, list3, list4,
list6, list7, list8, list9, list10, list11,
list12, list13, list14, list15,
list16, list17, list18, list19, list20,
list21, list22, list23, list24, list25]
var = [1, 2, 3, 4, 5]
for ref in f:
try:
cfunc = jit(nopython=True)(ref)
self.assertEqual(cfunc(var), ref(var))
except ValueError: # likely np array returned
try:
np.testing.assert_allclose(cfunc(var), ref(var))
except Exception:
raise
# test functions that are expected to fail
with self.assertRaises(TypingError) as raises:
cfunc = jit(nopython=True)(list5)
cfunc(var)
# TODO: we can't really assert the error message for the above
# Also, test_nested_array is a similar case (but without list) that works.
if sys.maxsize > 2 ** 32:
bits = 64
else:
bits = 32
def test_objmode_inlining(self):
def objmode_func(y):
z = object()
inlined = [x for x in y]
return inlined
cfunc = jit(forceobj=True)(objmode_func)
t = [1, 2, 3]
expected = objmode_func(t)
got = cfunc(t)
self.assertPreciseEqual(expected, got)
| TestListComprehension |
python | getlogbook__logbook | src/logbook/compat.py | {
"start": 5066,
"end": 7739
} | class ____(logbook.Handler):
"""Does the opposite of the :class:`RedirectLoggingHandler`, it sends
messages from logbook to logging. Because of that, it's a very bad
idea to configure both.
This handler is for logbook and will pass stuff over to a logger
from the standard library.
Example usage::
from logbook.compat import LoggingHandler, warn
with LoggingHandler():
warn("This goes to logging")
"""
def __init__(self, logger=None, level=logbook.NOTSET, filter=None, bubble=False):
logbook.Handler.__init__(self, level, filter, bubble)
if logger is None:
logger = logging.getLogger()
elif isinstance(logger, str):
logger = logging.getLogger(logger)
self.logger = logger
def get_logger(self, record):
"""Returns the logger to use for this record. This implementation
always return :attr:`logger`.
"""
return self.logger
def convert_level(self, level):
"""Converts a logbook level into a logging level."""
if level >= logbook.CRITICAL:
return logging.CRITICAL
if level >= logbook.ERROR:
return logging.ERROR
if level >= logbook.WARNING:
return logging.WARNING
if level >= logbook.INFO:
return logging.INFO
return logging.DEBUG
def convert_time(self, dt):
"""Converts a datetime object into a timestamp."""
if dt.tzinfo is None:
# Logbook uses naive datetimes to represent UTC (utcnow)
return dt.replace(tzinfo=timezone.utc).timestamp()
return dt.timestamp()
def convert_record(self, old_record):
"""Converts a record from logbook to logging."""
record = logging.LogRecord(
old_record.channel,
self.convert_level(old_record.level),
old_record.filename,
old_record.lineno,
old_record.message,
(),
old_record.exc_info,
func=old_record.func_name,
)
for key, value in old_record.extra.items():
record.__dict__.setdefault(key, value)
record.created = self.convert_time(old_record.time)
return record
def emit(self, record):
self.get_logger(record).handle(self.convert_record(record))
def redirect_warnings():
"""Like :func:`redirected_warnings` but will redirect all warnings
to the shutdown of the interpreter:
.. code-block:: python
from logbook.compat import redirect_warnings
redirect_warnings()
"""
redirected_warnings().__enter__()
| LoggingHandler |
python | scrapy__scrapy | tests/test_http_response.py | {
"start": 31642,
"end": 33403
} | class ____(TestTextResponse):
response_class = HtmlResponse
def test_html_encoding(self):
body = b"""<html><head><title>Some page</title>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head><body>Price: \xa3100</body></html>'
"""
r1 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r1, "iso-8859-1", body)
body = b"""<?xml version="1.0" encoding="iso-8859-1"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
Price: \xa3100
"""
r2 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r2, "iso-8859-1", body)
# for conflicting declarations headers must take precedence
body = b"""<html><head><title>Some page</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body>Price: \xa3100</body></html>'
"""
r3 = self.response_class(
"http://www.example.com",
body=body,
headers={"Content-type": ["text/html; charset=iso-8859-1"]},
)
self._assert_response_values(r3, "iso-8859-1", body)
# make sure replace() preserves the encoding of the original response
body = b"New body \xa3"
r4 = r3.replace(body=body)
self._assert_response_values(r4, "iso-8859-1", body)
def test_html5_meta_charset(self):
body = b"""<html><head><meta charset="gb2312" /><title>Some page</title><body>bla bla</body>"""
r1 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r1, "gb2312", body)
| TestHtmlResponse |
python | mlflow__mlflow | mlflow/projects/backend/abstract_backend.py | {
"start": 115,
"end": 2113
} | class ____:
"""
Abstract plugin class defining the interface needed to execute MLflow projects. You can define
subclasses of ``AbstractBackend`` and expose them as third-party plugins to enable running
MLflow projects against custom execution backends (e.g. to run projects against your team's
in-house cluster or job scheduler). See `MLflow Plugins <../../plugins.html>`_ for more
information.
"""
__metaclass__ = ABCMeta
@abstractmethod
def run(
self,
project_uri,
entry_point,
params,
version,
backend_config,
tracking_uri,
experiment_id,
):
"""
Submit an entrypoint. It must return a SubmittedRun object to track the execution
Args:
project_uri: URI of the project to execute, e.g. a local filesystem path
or a Git repository URI like https://github.com/mlflow/mlflow-example
entry_point: Entry point to run within the project.
params: Dict of parameters to pass to the entry point
version: For git-based projects, either a commit hash or a branch name.
backend_config: A dictionary, or a path to a JSON file (must end in '.json'), which
will be passed as config to the backend. The exact content which
should be provided is different for each execution backend and is
documented at https://www.mlflow.org/docs/latest/projects.html.
tracking_uri: URI of tracking server against which to log run information related
to project execution.
experiment_id: ID of experiment under which to launch the run.
Returns:
A :py:class:`mlflow.projects.SubmittedRun`. This function is expected to run
the project asynchronously, i.e. it should trigger project execution and then
immediately return a `SubmittedRun` to track execution status.
"""
| AbstractBackend |
python | pandas-dev__pandas | pandas/tests/io/test_clipboard.py | {
"start": 4986,
"end": 12560
} | class ____:
# Test that default arguments copy as tab delimited
# Test that explicit delimiters are respected
@pytest.mark.parametrize("sep", [None, "\t", ",", "|"])
@pytest.mark.parametrize("encoding", [None, "UTF-8", "utf-8", "utf8"])
def test_round_trip_frame_sep(self, df, sep, encoding):
df.to_clipboard(excel=None, sep=sep, encoding=encoding)
result = read_clipboard(sep=sep or "\t", index_col=0, encoding=encoding)
tm.assert_frame_equal(df, result)
# Test white space separator
def test_round_trip_frame_string(self, df):
df.to_clipboard(excel=False, sep=None)
result = read_clipboard()
assert df.to_string() == result.to_string()
assert df.shape == result.shape
# Two character separator is not supported in to_clipboard
# Test that multi-character separators are not silently passed
def test_excel_sep_warning(self, df):
with tm.assert_produces_warning(
UserWarning,
match="to_clipboard in excel mode requires a single character separator.",
check_stacklevel=False,
):
df.to_clipboard(excel=True, sep=r"\t")
# Separator is ignored when excel=False and should produce a warning
def test_copy_delim_warning(self, df):
with tm.assert_produces_warning(UserWarning, match="ignores the sep argument"):
df.to_clipboard(excel=False, sep="\t")
# Tests that the default behavior of to_clipboard is tab
# delimited and excel="True"
@pytest.mark.parametrize("sep", ["\t", None, "default"])
@pytest.mark.parametrize("excel", [True, None, "default"])
def test_clipboard_copy_tabs_default(self, sep, excel, df, clipboard):
kwargs = build_kwargs(sep, excel)
df.to_clipboard(**kwargs)
assert clipboard.text() == df.to_csv(sep="\t")
# Tests reading of white space separated tables
@pytest.mark.parametrize("sep", [None, "default"])
def test_clipboard_copy_strings(self, sep, df):
kwargs = build_kwargs(sep, False)
df.to_clipboard(**kwargs)
result = read_clipboard(sep=r"\s+")
assert result.to_string() == df.to_string()
assert df.shape == result.shape
def test_read_clipboard_infer_excel(self, clipboard):
# gh-19010: avoid warnings
clip_kwargs = {"engine": "python"}
text = dedent(
"""
John James\tCharlie Mingus
1\t2
4\tHarry Carney
""".strip()
)
clipboard.setText(text)
df = read_clipboard(**clip_kwargs)
# excel data is parsed correctly
assert df.iloc[1, 1] == "Harry Carney"
# having diff tab counts doesn't trigger it
text = dedent(
"""
a\t b
1 2
3 4
""".strip()
)
clipboard.setText(text)
res = read_clipboard(**clip_kwargs)
text = dedent(
"""
a b
1 2
3 4
""".strip()
)
clipboard.setText(text)
exp = read_clipboard(**clip_kwargs)
tm.assert_frame_equal(res, exp)
def test_infer_excel_with_nulls(self, clipboard):
# GH41108
text = "col1\tcol2\n1\tred\n\tblue\n2\tgreen"
clipboard.setText(text)
df = read_clipboard()
df_expected = DataFrame(
data={"col1": [1, None, 2], "col2": ["red", "blue", "green"]}
)
# excel data is parsed correctly
tm.assert_frame_equal(df, df_expected)
@pytest.mark.parametrize(
"multiindex",
[
( # Can't use `dedent` here as it will remove the leading `\t`
"\n".join(
[
"\t\t\tcol1\tcol2",
"A\t0\tTrue\t1\tred",
"A\t1\tTrue\t\tblue",
"B\t0\tFalse\t2\tgreen",
]
),
[["A", "A", "B"], [0, 1, 0], [True, True, False]],
),
(
"\n".join(
["\t\tcol1\tcol2", "A\t0\t1\tred", "A\t1\t\tblue", "B\t0\t2\tgreen"]
),
[["A", "A", "B"], [0, 1, 0]],
),
],
)
def test_infer_excel_with_multiindex(self, clipboard, multiindex):
# GH41108
clipboard.setText(multiindex[0])
df = read_clipboard()
df_expected = DataFrame(
data={"col1": [1, None, 2], "col2": ["red", "blue", "green"]},
index=multiindex[1],
)
# excel data is parsed correctly
tm.assert_frame_equal(df, df_expected)
def test_invalid_encoding(self, df):
msg = "clipboard only supports utf-8 encoding"
# test case for testing invalid encoding
with pytest.raises(ValueError, match=msg):
df.to_clipboard(encoding="ascii")
with pytest.raises(NotImplementedError, match=msg):
read_clipboard(encoding="ascii")
@pytest.mark.parametrize("data", ["\U0001f44d...", "Ωœ∑`...", "abcd..."])
def test_raw_roundtrip(self, data):
# PR #25040 wide unicode wasn't copied correctly on PY3 on windows
df = DataFrame({"data": [data]})
df.to_clipboard()
result = read_clipboard()
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("engine", ["c", "python"])
def test_read_clipboard_dtype_backend(
self, clipboard, string_storage, dtype_backend, engine, using_infer_string
):
# GH#50502
if dtype_backend == "pyarrow":
pa = pytest.importorskip("pyarrow")
string_dtype = pd.ArrowDtype(pa.string())
else:
string_dtype = pd.StringDtype(string_storage)
text = """a,b,c,d,e,f,g,h,i
x,1,4.0,x,2,4.0,,True,False
y,2,5.0,,,,,False,"""
clipboard.setText(text)
with pd.option_context("mode.string_storage", string_storage):
result = read_clipboard(sep=",", dtype_backend=dtype_backend, engine=engine)
expected = DataFrame(
{
"a": Series(["x", "y"], dtype=string_dtype),
"b": Series([1, 2], dtype="Int64"),
"c": Series([4.0, 5.0], dtype="Float64"),
"d": Series(["x", None], dtype=string_dtype),
"e": Series([2, NA], dtype="Int64"),
"f": Series([4.0, NA], dtype="Float64"),
"g": Series([NA, NA], dtype="Int64"),
"h": Series([True, False], dtype="boolean"),
"i": Series([False, NA], dtype="boolean"),
}
)
if dtype_backend == "pyarrow":
from pandas.arrays import ArrowExtensionArray
expected = DataFrame(
{
col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
for col in expected.columns
}
)
expected["g"] = ArrowExtensionArray(pa.array([None, None]))
if using_infer_string:
expected.columns = expected.columns.astype(
pd.StringDtype(string_storage, na_value=np.nan)
)
tm.assert_frame_equal(result, expected)
def test_invalid_dtype_backend(self):
msg = (
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
"'pyarrow' are allowed."
)
with pytest.raises(ValueError, match=msg):
read_clipboard(dtype_backend="numpy")
| TestClipboard |
python | mlflow__mlflow | mlflow/genai/evaluation/entities.py | {
"start": 660,
"end": 5048
} | class ____:
"""Represents a row in the evaluation dataset."""
"""Unique identifier for the eval item."""
request_id: str
"""Raw input to the model/application when `evaluate` is called."""
inputs: dict[str, Any]
"""Raw output from the model/application."""
outputs: Any
"""Expectations from the eval item."""
expectations: dict[str, Any]
"""Tags from the eval item."""
tags: dict[str, str] | None = None
"""Trace of the model invocation."""
trace: Trace | None = None
"""Error message if the model invocation fails."""
error_message: str | None = None
"""Source information for the eval item (e.g., from which trace it was created)."""
source: DatasetRecordSource | None = None
@classmethod
def from_dataset_row(cls, row: dict[str, Any]) -> "EvalItem":
"""
Create an EvalItem from a row of input Pandas Dataframe row.
"""
if (inputs := row.get(InputDatasetColumn.INPUTS)) is not None:
inputs = cls._parse_inputs(inputs)
outputs = row.get(InputDatasetColumn.OUTPUTS)
# Extract trace column from the dataset.
trace = row.get(InputDatasetColumn.TRACE)
if is_none_or_nan(trace):
trace = None
else:
trace = trace if isinstance(trace, Trace) else Trace.from_json(trace)
# Extract expectations column from the dataset.
expectations = row.get(InputDatasetColumn.EXPECTATIONS, {})
# Extract tags column from the dataset.
tags = row.get(InputDatasetColumn.TAGS, {})
# Extract source column from the dataset.
source = row.get(InputDatasetColumn.SOURCE)
if is_none_or_nan(source):
source = None
# Get the request ID from the row, or generate a new unique ID if not present.
request_id = row.get(InputDatasetColumn.REQUEST_ID)
if is_none_or_nan(request_id):
hashable_strings = [
str(x) for x in [inputs, outputs, trace, expectations] if x is not None
]
# this should not happen, but added a check in case
if not hashable_strings:
raise MlflowException.invalid_parameter_value(
"Dataset row must contain at least one non-None value"
)
request_id = hashlib.sha256(str(hashable_strings[0]).encode()).hexdigest()
return cls(
request_id=request_id,
inputs=inputs,
outputs=outputs,
expectations=expectations,
tags=tags,
trace=trace,
source=source,
)
@classmethod
def _parse_inputs(cls, data: str | dict[str, Any]) -> Any:
# The inputs can be either a dictionary or JSON-serialized version of it.
if isinstance(data, dict):
return data
elif isinstance(data, str): # JSON-serialized string
try:
return json.loads(data)
except Exception:
pass
return data
def get_expectation_assessments(self) -> list[Expectation]:
"""Get the expectations as a list of Expectation objects."""
expectations = []
for name, value in self.expectations.items():
source_id = get_context().get_user_name()
expectations.append(
Expectation(
trace_id=self.trace.info.trace_id if self.trace else None,
name=name,
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN,
source_id=source_id or "unknown",
),
value=value,
)
)
return expectations
def to_dict(self) -> dict[str, Any]:
inputs = {
ResultDataFrameColumn.REQUEST_ID: self.request_id,
ResultDataFrameColumn.INPUTS: self.inputs,
ResultDataFrameColumn.OUTPUTS: self.outputs,
ResultDataFrameColumn.TRACE: self.trace.to_json() if self.trace else None,
ResultDataFrameColumn.EXPECTATIONS: self.expectations,
ResultDataFrameColumn.TAGS: self.tags,
ResultDataFrameColumn.ERROR_MESSAGE: self.error_message,
}
return {k: v for k, v in inputs.items() if v is not None}
@dataclass
| EvalItem |
python | numba__numba | numba/tests/test_annotations.py | {
"start": 4735,
"end": 7540
} | class ____(unittest.TestCase):
def findpatloc(self, lines, pat):
for i, ln in enumerate(lines):
if pat in ln:
return i
raise ValueError("can't find {!r}".format(pat))
def getlines(self, func):
strbuf = StringIO()
func.inspect_types(strbuf)
return strbuf.getvalue().splitlines()
def test_delete(self):
@numba.njit
def foo(appleorange, berrycherry):
return appleorange + berrycherry
foo(1, 2)
lines = self.getlines(foo)
# Ensure deletion show up after their use
sa = self.findpatloc(lines, 'appleorange = arg(0, name=appleorange)')
sb = self.findpatloc(lines, 'berrycherry = arg(1, name=berrycherry)')
ea = self.findpatloc(lines, 'del appleorange')
eb = self.findpatloc(lines, 'del berrycherry')
self.assertLess(sa, ea)
self.assertLess(sb, eb)
def _lifetimes_impl(self, extend):
with override_config('EXTEND_VARIABLE_LIFETIMES', extend):
@njit
def foo(a):
b = a
return b
x = 10
b = foo(x)
self.assertEqual(b, x)
lines = self.getlines(foo)
sa = self.findpatloc(lines, 'a = arg(0, name=a)')
sb = self.findpatloc(lines, 'b = a')
cast_ret = self.findpatloc(lines, 'cast(value=b)')
dela = self.findpatloc(lines, 'del a')
delb = self.findpatloc(lines, 'del b')
return sa, sb, cast_ret, dela, delb
def test_delete_standard_lifetimes(self):
# without extended lifetimes, dels occur as soon as dead
#
# label 0
# a = arg(0, name=a) :: int64
# b = a :: int64
# del a
# $8return_value.2 = cast(value=b) :: int64
# del b
# return $8return_value.2
sa, sb, cast_ret, dela, delb = self._lifetimes_impl(extend=0)
self.assertLess(sa, dela)
self.assertLess(sb, delb)
# del a is before cast and del b is after
self.assertLess(dela, cast_ret)
self.assertGreater(delb, cast_ret)
def test_delete_extended_lifetimes(self):
# with extended lifetimes, dels are last in block:
#
# label 0
# a = arg(0, name=a) :: int64
# b = a :: int64
# $8return_value.2 = cast(value=b) :: int64
# del a
# del b
# return $8return_value.2
sa, sb, cast_ret, dela, delb = self._lifetimes_impl(extend=1)
self.assertLess(sa, dela)
self.assertLess(sb, delb)
# dels are after the cast
self.assertGreater(dela, cast_ret)
self.assertGreater(delb, cast_ret)
if __name__ == '__main__':
unittest.main()
| TestTypeAnnotation |
python | sphinx-doc__sphinx | sphinx/search/zh.py | {
"start": 847,
"end": 2292
} | class ____(SearchLanguage):
"""Chinese search implementation"""
lang = 'zh'
language_name = 'Chinese'
js_stemmer_rawcode = 'english-stemmer.js'
stopwords = ENGLISH_STOPWORDS
latin1_letters = re.compile(r'[a-zA-Z0-9_]+')
def __init__(self, options: dict[str, str]) -> None:
super().__init__(options)
self.latin_terms: set[str] = set()
dict_path = options.get('dict', JIEBA_DEFAULT_DICT)
if dict_path and Path(dict_path).is_file():
jieba_load_userdict(str(dict_path))
self.stemmer = snowballstemmer.stemmer('english')
def split(self, input: str) -> list[str]:
chinese: list[str] = list(cut_for_search(input))
latin1 = [term.strip() for term in self.latin1_letters.findall(input)]
self.latin_terms.update(latin1)
return chinese + latin1
def word_filter(self, stemmed_word: str) -> bool:
return len(stemmed_word) > 1
def stem(self, word: str) -> str:
# Don't stem Latin words that are long enough to be relevant for search
# if not stemmed, but would be too short after being stemmed
# avoids some issues with acronyms
stemmed = self.stemmer.stemWord(word.lower())
should_not_be_stemmed = (
len(word) >= 3 > len(stemmed) and word in self.latin_terms
)
if should_not_be_stemmed:
return word.lower()
return stemmed
| SearchChinese |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 43643,
"end": 50935
} | class ____(BaseTest):
def jit(self, asm=asm_sum, func_name="sum", target_machine=None,
add_process=False, func_type=CFUNCTYPE(c_int, c_int, c_int),
suppress_errors=False):
lljit = llvm.create_lljit_compiler(target_machine,
use_jit_link=False,
suppress_errors=suppress_errors)
builder = llvm.JITLibraryBuilder()
if add_process:
builder.add_current_process()
rt = builder\
.add_ir(asm.format(triple=llvm.get_default_triple()))\
.export_symbol(func_name)\
.link(lljit, func_name)
cfptr = rt[func_name]
self.assertTrue(cfptr)
self.assertEqual(func_name, rt.name)
return lljit, rt, func_type(cfptr)
# From test_dylib_symbols
def test_define_symbol(self):
lljit = llvm.create_lljit_compiler()
rt = llvm.JITLibraryBuilder().import_symbol("__xyzzy", 1234)\
.export_symbol("__xyzzy").link(lljit, "foo")
self.assertEqual(rt["__xyzzy"], 1234)
def test_lookup_undefined_symbol_fails(self):
lljit = llvm.create_lljit_compiler()
with self.assertRaisesRegex(RuntimeError, 'No such library'):
lljit.lookup("foo", "__foobar")
rt = llvm.JITLibraryBuilder().import_symbol("__xyzzy", 1234)\
.export_symbol("__xyzzy").link(lljit, "foo")
self.assertNotEqual(rt["__xyzzy"], 0)
with self.assertRaisesRegex(RuntimeError,
'Symbols not found.*__foobar'):
lljit.lookup("foo", "__foobar")
def test_jit_link(self):
if sys.platform == "win32":
with self.assertRaisesRegex(RuntimeError,
'JITLink .* Windows'):
llvm.create_lljit_compiler(use_jit_link=True)
else:
self.assertIsNotNone(llvm.create_lljit_compiler(use_jit_link=True))
def test_run_code(self):
(lljit, rt, cfunc) = self.jit()
with lljit:
res = cfunc(2, -5)
self.assertEqual(-3, res)
def test_close(self):
(lljit, rt, cfunc) = self.jit()
lljit.close()
lljit.close()
with self.assertRaises(AssertionError):
lljit.lookup("foo", "fn")
def test_with(self):
(lljit, rt, cfunc) = self.jit()
with lljit:
pass
with self.assertRaises(RuntimeError):
with lljit:
pass
with self.assertRaises(AssertionError):
lljit.lookup("foo", "fn")
def test_add_ir_module(self):
(lljit, rt_sum, cfunc_sum) = self.jit()
rt_mul = llvm.JITLibraryBuilder() \
.add_ir(asm_mul.format(triple=llvm.get_default_triple())) \
.export_symbol("mul") \
.link(lljit, "mul")
res = CFUNCTYPE(c_int, c_int, c_int)(rt_mul["mul"])(2, -5)
self.assertEqual(-10, res)
self.assertNotEqual(lljit.lookup("sum", "sum")["sum"], 0)
self.assertNotEqual(lljit.lookup("mul", "mul")["mul"], 0)
with self.assertRaises(RuntimeError):
lljit.lookup("sum", "mul")
with self.assertRaises(RuntimeError):
lljit.lookup("mul", "sum")
def test_remove_module(self):
(lljit, rt_sum, _) = self.jit()
del rt_sum
gc.collect()
with self.assertRaises(RuntimeError):
lljit.lookup("sum", "sum")
lljit.close()
def test_lib_depends(self):
(lljit, rt_sum, cfunc_sum) = self.jit()
rt_mul = llvm.JITLibraryBuilder() \
.add_ir(asm_square_sum.format(triple=llvm.get_default_triple())) \
.export_symbol("square_sum") \
.add_jit_library("sum") \
.link(lljit, "square_sum")
res = CFUNCTYPE(c_int, c_int, c_int)(rt_mul["square_sum"])(2, -5)
self.assertEqual(9, res)
def test_target_data(self):
(lljit, rt, _) = self.jit()
td = lljit.target_data
# A singleton is returned
self.assertIs(lljit.target_data, td)
str(td)
del lljit
str(td)
def test_global_ctors_dtors(self):
# test issue #303
# (https://github.com/numba/llvmlite/issues/303)
shared_value = c_int32(0)
lljit = llvm.create_lljit_compiler()
builder = llvm.JITLibraryBuilder()
rt = builder \
.add_ir(asm_ext_ctors.format(triple=llvm.get_default_triple())) \
.import_symbol("A", ctypes.addressof(shared_value)) \
.export_symbol("foo") \
.link(lljit, "foo")
foo = rt["foo"]
self.assertTrue(foo)
self.assertEqual(CFUNCTYPE(c_int)(foo)(), 12)
del rt
self.assertNotEqual(shared_value.value, 20)
def test_lookup_current_process_symbol_fails(self):
# An attempt to lookup a symbol in the current process (Py_GetVersion,
# in this case) should fail with an appropriate error if we have not
# enabled searching the current process for symbols.
msg = 'Failed to materialize symbols:.*getversion'
with self.assertRaisesRegex(RuntimeError, msg):
self.jit(asm_getversion, "getversion", suppress_errors=True)
def test_lookup_current_process_symbol(self):
self.jit(asm_getversion, "getversion", None, True)
def test_thread_safe(self):
lljit = llvm.create_lljit_compiler()
llvm_ir = asm_sum.format(triple=llvm.get_default_triple())
def compile_many(i):
def do_work():
tracking = []
for c in range(50):
tracking.append(llvm.JITLibraryBuilder()
.add_ir(llvm_ir)
.export_symbol("sum")
.link(lljit, f"sum_{i}_{c}"))
return do_work
ths = [threading.Thread(target=compile_many(i))
for i in range(os.cpu_count())]
for th in ths:
th.start()
for th in ths:
th.join()
def test_add_object_file(self):
target_machine = self.target_machine(jit=False)
mod = self.module()
lljit = llvm.create_lljit_compiler(target_machine)
rt = llvm.JITLibraryBuilder()\
.add_object_img(target_machine.emit_object(mod))\
.export_symbol("sum")\
.link(lljit, "sum")
sum = CFUNCTYPE(c_int, c_int, c_int)(rt["sum"])
self.assertEqual(sum(2, 3), 5)
def test_add_object_file_from_filesystem(self):
target_machine = self.target_machine(jit=False)
mod = self.module()
obj_bin = target_machine.emit_object(mod)
temp_desc, temp_path = mkstemp()
try:
with os.fdopen(temp_desc, "wb") as f:
f.write(obj_bin)
lljit = llvm.create_lljit_compiler(target_machine)
rt = llvm.JITLibraryBuilder() \
.add_object_file(temp_path) \
.export_symbol("sum") \
.link(lljit, "sum")
sum = CFUNCTYPE(c_int, c_int, c_int)(rt["sum"])
self.assertEqual(sum(2, 3), 5)
finally:
os.unlink(temp_path)
| TestOrcLLJIT |
python | walkccc__LeetCode | solutions/335. Self Crossing/335.py | {
"start": 0,
"end": 476
} | class ____:
def isSelfCrossing(self, x: list[int]) -> bool:
if len(x) <= 3:
return False
for i in range(3, len(x)):
if x[i - 2] <= x[i] and x[i - 1] <= x[i - 3]:
return True
if i >= 4 and x[i - 1] == x[i - 3] and x[i - 2] <= x[i] + x[i - 4]:
return True
if i >= 5 and x[i - 4] <= x[i - 2] and x[i - 2] <= x[i] + x[i - 4] and x[i - 1] <= x[i - 3] and x[i - 3] <= x[i - 1] + x[i - 5]:
return True
return False
| Solution |
python | getsentry__sentry | src/sentry/api/analytics.py | {
"start": 263,
"end": 440
} | class ____(analytics.Event):
org_id: int
search_type: str
query: str
@analytics.eventclass("group_similar_issues_embeddings.count")
| OrganizationSavedSearchDeletedEvent |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofwork.py | {
"start": 37921,
"end": 43278
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"data",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("a", String(50)),
Column("b", String(50)),
)
Table(
"subdata",
metadata,
Column("id", Integer, ForeignKey("data.id"), primary_key=True),
Column("c", String(50)),
)
@classmethod
def setup_mappers(cls):
class Data(cls.Basic):
pass
def test_refreshes(self):
Data, data = self.classes.Data, self.tables.data
self.mapper_registry.map_imperatively(
Data,
data,
properties={
"aplusb": column_property(
data.c.a + literal_column("' '") + data.c.b
)
},
)
self._test(True)
def test_no_refresh_ro_column_property_no_expire_on_flush(self):
Data, data = self.classes.Data, self.tables.data
self.mapper_registry.map_imperatively(
Data,
data,
properties={
"aplusb": column_property(
data.c.a + literal_column("' '") + data.c.b,
expire_on_flush=False,
)
},
)
self._test(False)
def test_no_refresh_ro_column_property_expire_on_flush(self):
Data, data = self.classes.Data, self.tables.data
self.mapper_registry.map_imperatively(
Data,
data,
properties={
"aplusb": column_property(
data.c.a + literal_column("' '") + data.c.b,
expire_on_flush=True,
)
},
)
self._test(True)
def test_no_refresh_ro_deferred_no_expire_on_flush(self):
Data, data = self.classes.Data, self.tables.data
self.mapper_registry.map_imperatively(
Data,
data,
properties={
"aplusb": column_property(
data.c.a + literal_column("' '") + data.c.b,
expire_on_flush=False,
deferred=True,
)
},
)
self._test(False, expect_deferred_load=True)
def test_no_refresh_ro_deferred_expire_on_flush(self):
Data, data = self.classes.Data, self.tables.data
self.mapper_registry.map_imperatively(
Data,
data,
properties={
"aplusb": column_property(
data.c.a + literal_column("' '") + data.c.b,
expire_on_flush=True,
deferred=True,
)
},
)
self._test(True, expect_deferred_load=True)
def test_refreshes_post_init(self):
Data, data = self.classes.Data, self.tables.data
m = self.mapper_registry.map_imperatively(Data, data)
m.add_property(
"aplusb",
column_property(data.c.a + literal_column("' '") + data.c.b),
)
self._test(True)
def test_with_inheritance(self):
subdata, data, Data = (
self.tables.subdata,
self.tables.data,
self.classes.Data,
)
class SubData(Data):
pass
self.mapper_registry.map_imperatively(
Data,
data,
properties={
"aplusb": column_property(
data.c.a + literal_column("' '") + data.c.b
)
},
)
self.mapper_registry.map_imperatively(SubData, subdata, inherits=Data)
sess = fixture_session()
sd1 = SubData(a="hello", b="there", c="hi")
sess.add(sd1)
sess.flush()
eq_(sd1.aplusb, "hello there")
def _test(self, expect_expiry, expect_deferred_load=False):
Data = self.classes.Data
with fixture_session() as sess:
d1 = Data(a="hello", b="there")
sess.add(d1)
sess.flush()
eq_(d1.aplusb, "hello there")
d1.b = "bye"
sess.flush()
if expect_expiry:
eq_(d1.aplusb, "hello bye")
else:
eq_(d1.aplusb, "hello there")
d1.b = "foobar"
d1.aplusb = "im setting this explicitly"
sess.flush()
eq_(d1.aplusb, "im setting this explicitly")
sess.commit()
# test issue #3984.
# NOTE: if we only expire_all() here rather than start with brand new
# 'd1', d1.aplusb since it was loaded moves into "expired" and stays
# "undeferred". this is questionable but not as severe as the never-
# loaded attribute being loaded during an unexpire.
with fixture_session() as sess:
d1 = sess.query(Data).first()
d1.b = "so long"
sess.flush()
sess.expire_all()
eq_(d1.b, "so long")
if expect_deferred_load:
eq_("aplusb" in d1.__dict__, False)
else:
eq_("aplusb" in d1.__dict__, True)
eq_(d1.aplusb, "hello so long")
| ColumnPropertyTest |
python | pydantic__pydantic | pydantic/v1/config.py | {
"start": 2479,
"end": 6532
} | class ____:
title: Optional[str] = None
anystr_lower: bool = False
anystr_upper: bool = False
anystr_strip_whitespace: bool = False
min_anystr_length: int = 0
max_anystr_length: Optional[int] = None
validate_all: bool = False
extra: Extra = Extra.ignore
allow_mutation: bool = True
frozen: bool = False
allow_population_by_field_name: bool = False
use_enum_values: bool = False
fields: Dict[str, Union[str, Dict[str, str]]] = {}
validate_assignment: bool = False
error_msg_templates: Dict[str, str] = {}
arbitrary_types_allowed: bool = False
orm_mode: bool = False
getter_dict: Type[GetterDict] = GetterDict
alias_generator: Optional[Callable[[str], str]] = None
keep_untouched: Tuple[type, ...] = ()
schema_extra: Union[Dict[str, Any], 'SchemaExtraCallable'] = {}
json_loads: Callable[[str], Any] = json.loads
json_dumps: Callable[..., str] = json.dumps
json_encoders: Dict[Union[Type[Any], str, ForwardRef], AnyCallable] = {}
underscore_attrs_are_private: bool = False
allow_inf_nan: bool = True
# whether inherited models as fields should be reconstructed as base model,
# and whether such a copy should be shallow or deep
copy_on_model_validation: Literal['none', 'deep', 'shallow'] = 'shallow'
# whether `Union` should check all allowed types before even trying to coerce
smart_union: bool = False
# whether dataclass `__post_init__` should be run before or after validation
post_init_call: Literal['before_validation', 'after_validation'] = 'before_validation'
@classmethod
def get_field_info(cls, name: str) -> Dict[str, Any]:
"""
Get properties of FieldInfo from the `fields` property of the config class.
"""
fields_value = cls.fields.get(name)
if isinstance(fields_value, str):
field_info: Dict[str, Any] = {'alias': fields_value}
elif isinstance(fields_value, dict):
field_info = fields_value
else:
field_info = {}
if 'alias' in field_info:
field_info.setdefault('alias_priority', 2)
if field_info.get('alias_priority', 0) <= 1 and cls.alias_generator:
alias = cls.alias_generator(name)
if not isinstance(alias, str):
raise TypeError(f'Config.alias_generator must return str, not {alias.__class__}')
field_info.update(alias=alias, alias_priority=1)
return field_info
@classmethod
def prepare_field(cls, field: 'ModelField') -> None:
"""
Optional hook to check or modify fields during model creation.
"""
pass
def get_config(config: Union[ConfigDict, Type[object], None]) -> Type[BaseConfig]:
if config is None:
return BaseConfig
else:
config_dict = (
config
if isinstance(config, dict)
else {k: getattr(config, k) for k in dir(config) if not k.startswith('__')}
)
class Config(BaseConfig):
...
for k, v in config_dict.items():
setattr(Config, k, v)
return Config
def inherit_config(self_config: 'ConfigType', parent_config: 'ConfigType', **namespace: Any) -> 'ConfigType':
if not self_config:
base_classes: Tuple['ConfigType', ...] = (parent_config,)
elif self_config == parent_config:
base_classes = (self_config,)
else:
base_classes = self_config, parent_config
namespace['json_encoders'] = {
**getattr(parent_config, 'json_encoders', {}),
**getattr(self_config, 'json_encoders', {}),
**namespace.get('json_encoders', {}),
}
return type('Config', base_classes, namespace)
def prepare_config(config: Type[BaseConfig], cls_name: str) -> None:
if not isinstance(config.extra, Extra):
try:
config.extra = Extra(config.extra)
except ValueError:
raise ValueError(f'"{cls_name}": {config.extra} is not a valid value for "extra"')
| BaseConfig |
python | sympy__sympy | sympy/matrices/expressions/factorizations.py | {
"start": 87,
"end": 233
} | class ____(MatrixExpr):
arg = property(lambda self: self.args[0])
shape = property(lambda self: self.arg.shape) # type: ignore
| Factorization |
python | getsentry__sentry | src/sentry/api/endpoints/rule_snooze.py | {
"start": 8998,
"end": 10357
} | class ____(BaseRuleSnoozeEndpoint[Rule]):
owner = ApiOwner.ISSUES
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
rule_field = "rule"
def fetch_rule_list(self, project: Project) -> BaseQuerySet[Rule]:
queryset = Rule.objects.filter(project=project)
return queryset
def fetch_instance(self, rule: Rule, user_id: int | None, **kwargs: Any) -> RuleSnooze:
rule_snooze = RuleSnooze.objects.get(user_id=user_id, rule=rule, **kwargs)
return rule_snooze
def create_instance(self, rule: Rule, user_id: int | None, **kwargs: Any) -> RuleSnooze:
with transaction.atomic(router.db_for_write(RuleSnooze)):
rule_snooze = RuleSnooze.objects.create(user_id=user_id, rule=rule, **kwargs)
_update_workflow_engine_models(rule_snooze, is_enabled=False)
return rule_snooze
def record_audit_log_entry(
self, request: Request, organization: Organization, rule: Rule, **kwargs: Any
) -> None:
self.create_audit_entry(
request=request,
organization=organization,
target_object=rule.id,
event=audit_log.get_event_id("RULE_SNOOZE"),
data=rule.get_audit_log_data(),
**kwargs,
)
@region_silo_endpoint
| RuleSnoozeEndpoint |
python | Textualize__textual | src/textual/widgets/_selection_list.py | {
"start": 2501,
"end": 25078
} | class ____(Generic[SelectionType], OptionList):
"""A vertical selection list that allows making multiple selections."""
BINDINGS = [Binding("space", "select", "Toggle option", show=False)]
"""
| Key(s) | Description |
| :- | :- |
| space | Toggle the state of the highlighted selection. |
"""
COMPONENT_CLASSES: ClassVar[set[str]] = {
"selection-list--button",
"selection-list--button-selected",
"selection-list--button-highlighted",
"selection-list--button-selected-highlighted",
}
"""
| Class | Description |
| :- | :- |
| `selection-list--button` | Target the default button style. |
| `selection-list--button-selected` | Target a selected button style. |
| `selection-list--button-highlighted` | Target a highlighted button style. |
| `selection-list--button-selected-highlighted` | Target a highlighted selected button style. |
"""
DEFAULT_CSS = """
SelectionList {
height: auto;
text-wrap: nowrap;
text-overflow: ellipsis;
& > .selection-list--button {
color: $panel-darken-2;
background: $panel;
}
& > .selection-list--button-highlighted {
color: $panel-darken-2;
background: $panel;
}
& > .selection-list--button-selected {
color: $text-success;
background: $panel;
}
& > .selection-list--button-selected-highlighted {
color: $text-success;
background: $panel;
}
}
"""
class SelectionMessage(Generic[MessageSelectionType], Message):
"""Base class for all selection messages."""
def __init__(
self, selection_list: SelectionList[MessageSelectionType], index: int
) -> None:
"""Initialise the selection message.
Args:
selection_list: The selection list that owns the selection.
index: The index of the selection that the message relates to.
"""
super().__init__()
self.selection_list: SelectionList[MessageSelectionType] = selection_list
"""The selection list that sent the message."""
self.selection: Selection[MessageSelectionType] = (
selection_list.get_option_at_index(index)
)
"""The highlighted selection."""
self.selection_index: int = index
"""The index of the selection that the message relates to."""
@property
def control(self) -> OptionList:
"""The selection list that sent the message.
This is an alias for
[`SelectionMessage.selection_list`][textual.widgets.SelectionList.SelectionMessage.selection_list]
and is used by the [`on`][textual.on] decorator.
"""
return self.selection_list
def __rich_repr__(self) -> Result:
yield "selection_list", self.selection_list
yield "selection", self.selection
yield "selection_index", self.selection_index
class SelectionHighlighted(SelectionMessage[MessageSelectionType]):
"""Message sent when a selection is highlighted.
Can be handled using `on_selection_list_selection_highlighted` in a subclass of
[`SelectionList`][textual.widgets.SelectionList] or in a parent node in the DOM.
"""
class SelectionToggled(SelectionMessage[MessageSelectionType]):
"""Message sent when a selection is toggled.
This is only sent when the value is *explicitly* toggled e.g.
via `toggle` or `toggle_all`, or via user interaction.
If you programmatically set a value to be selected, this message will
not be sent, even if it happens to be the opposite of what was
originally selected (i.e. setting a True to a False or vice-versa).
Since this message indicates a toggle occurring at a per-option level,
a message will be sent for each option that is toggled, even when a
bulk action is performed (e.g. via `toggle_all`).
Can be handled using `on_selection_list_selection_toggled` in a subclass of
[`SelectionList`][textual.widgets.SelectionList] or in a parent node in the DOM.
"""
@dataclass
class SelectedChanged(Generic[MessageSelectionType], Message):
"""Message sent when the collection of selected values changes.
This is sent regardless of whether the change occurred via user interaction
or programmatically via the `SelectionList` API.
When a bulk change occurs, such as through `select_all` or `deselect_all`,
only a single `SelectedChanged` message will be sent (rather than one per
option).
Can be handled using `on_selection_list_selected_changed` in a subclass of
[`SelectionList`][textual.widgets.SelectionList] or in a parent node in the DOM.
"""
selection_list: SelectionList[MessageSelectionType]
"""The `SelectionList` that sent the message."""
@property
def control(self) -> SelectionList[MessageSelectionType]:
"""An alias for `selection_list`."""
return self.selection_list
def __init__(
self,
*selections: Selection[SelectionType]
| tuple[ContentText, SelectionType]
| tuple[ContentText, SelectionType, bool],
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
compact: bool = False,
):
"""Initialise the selection list.
Args:
*selections: The content for the selection list.
name: The name of the selection list.
id: The ID of the selection list in the DOM.
classes: The CSS classes of the selection list.
disabled: Whether the selection list is disabled or not.
compact: Enable a compact style?
"""
self._selected: dict[SelectionType, None] = {}
"""Tracking of which values are selected."""
self._send_messages = False
"""Keep track of when we're ready to start sending messages."""
options = [self._make_selection(selection) for selection in selections]
self._values: dict[SelectionType, int] = {
option.value: index for index, option in enumerate(options)
}
"""Keeps track of which value relates to which option."""
super().__init__(*options, name=name, id=id, classes=classes, disabled=disabled)
self.compact = compact
@property
def selected(self) -> list[SelectionType]:
"""The selected values.
This is a list of all of the
[values][textual.widgets.selection_list.Selection.value] associated
with selections in the list that are currently in the selected
state.
"""
return list(self._selected.keys())
def _on_mount(self, _event: events.Mount) -> None:
"""Configure the list once the DOM is ready."""
self._send_messages = True
def _message_changed(self) -> None:
"""Post a message that the selected collection has changed, where appropriate.
Note:
A message will only be sent if `_send_messages` is `True`. This
makes this safe to call before the widget is ready for posting
messages.
"""
if self._send_messages:
self.post_message(self.SelectedChanged(self).set_sender(self))
def _message_toggled(self, option_index: int) -> None:
"""Post a message that an option was toggled, where appropriate.
Note:
A message will only be sent if `_send_messages` is `True`. This
makes this safe to call before the widget is ready for posting
messages.
"""
if self._send_messages:
self.post_message(
self.SelectionToggled(self, option_index).set_sender(self)
)
def _apply_to_all(self, state_change: Callable[[SelectionType], bool]) -> Self:
"""Apply a selection state change to all selection options in the list.
Args:
state_change: The state change function to apply.
Returns:
The [`SelectionList`][textual.widgets.SelectionList] instance.
Note:
This method will post a single
[`SelectedChanged`][textual.widgets.OptionList.SelectedChanged]
message if a change is made in a call to this method.
"""
# Keep track of if anything changed.
changed = False
# Apply the state change function to all options.
# We don't send a SelectedChanged for each option, and instead
# send a single SelectedChanged afterwards if any values change.
with self.prevent(self.SelectedChanged):
for selection in self._options:
changed = (
state_change(cast(Selection[SelectionType], selection).value)
or changed
)
# If the above did make a change, *then* send a message.
if changed:
self._message_changed()
self.refresh()
return self
def _select(self, value: SelectionType) -> bool:
"""Mark the given value as selected.
Args:
value: The value to mark as selected.
Returns:
`True` if the value was selected, `False` if not.
"""
if value not in self._selected:
self._selected[value] = None
self._message_changed()
return True
return False
def select(self, selection: Selection[SelectionType] | SelectionType) -> Self:
"""Mark the given selection as selected.
Args:
selection: The selection to mark as selected.
Returns:
The [`SelectionList`][textual.widgets.SelectionList] instance.
"""
if self._select(
selection.value
if isinstance(selection, Selection)
else cast(SelectionType, selection)
):
self.refresh()
return self
def select_all(self) -> Self:
"""Select all items.
Returns:
The [`SelectionList`][textual.widgets.SelectionList] instance.
"""
return self._apply_to_all(self._select)
def _deselect(self, value: SelectionType) -> bool:
"""Mark the given selection as not selected.
Args:
value: The value to mark as not selected.
Returns:
`True` if the value was deselected, `False` if not.
"""
try:
del self._selected[value]
except KeyError:
return False
self._message_changed()
return True
def deselect(self, selection: Selection[SelectionType] | SelectionType) -> Self:
"""Mark the given selection as not selected.
Args:
selection: The selection to mark as not selected.
Returns:
The [`SelectionList`][textual.widgets.SelectionList] instance.
"""
if self._deselect(
selection.value
if isinstance(selection, Selection)
else cast(SelectionType, selection)
):
self.refresh()
return self
def deselect_all(self) -> Self:
"""Deselect all items.
Returns:
The [`SelectionList`][textual.widgets.SelectionList] instance.
"""
return self._apply_to_all(self._deselect)
def _toggle(self, value: SelectionType) -> bool:
"""Toggle the selection state of the given value.
Args:
value: The value to toggle.
Returns:
`True`.
"""
if value in self._selected:
self._deselect(value)
else:
self._select(value)
self._message_toggled(self._values[value])
return True
def toggle(self, selection: Selection[SelectionType] | SelectionType) -> Self:
"""Toggle the selected state of the given selection.
Args:
selection: The selection to toggle.
Returns:
The [`SelectionList`][textual.widgets.SelectionList] instance.
"""
self._toggle(
selection.value
if isinstance(selection, Selection)
else cast(SelectionType, selection)
)
self.refresh()
return self
def toggle_all(self) -> Self:
"""Toggle all items.
Returns:
The [`SelectionList`][textual.widgets.SelectionList] instance.
"""
return self._apply_to_all(self._toggle)
def _make_selection(
self,
selection: (
Selection[SelectionType]
| tuple[ContentText, SelectionType]
| tuple[ContentText, SelectionType, bool]
),
) -> Selection[SelectionType]:
"""Turn incoming selection data into a `Selection` instance.
Args:
selection: The selection data.
Returns:
An instance of a `Selection`.
Raises:
SelectionError: If the selection was badly-formed.
"""
# If we've been given a tuple of some sort, turn that into a proper
# Selection.
if isinstance(selection, tuple):
if len(selection) == 2:
selection = cast(
"tuple[ContentText, SelectionType, bool]", (*selection, False)
)
elif len(selection) != 3:
raise SelectionError(f"Expected 2 or 3 values, got {len(selection)}")
selection = Selection[SelectionType](*selection)
# At this point we should have a proper selection.
assert isinstance(selection, Selection)
# If the initial state for this is that it's selected, add it to the
# selected collection.
if selection.initial_state:
self._select(selection.value)
return selection
def _toggle_highlighted_selection(self) -> None:
"""Toggle the state of the highlighted selection.
If nothing is selected in the list this is a non-operation.
"""
if self.highlighted is not None:
self.toggle(self.get_option_at_index(self.highlighted))
def _get_left_gutter_width(self) -> int:
"""Returns the size of any left gutter that should be taken into account.
Returns:
The width of the left gutter.
"""
return len(
ToggleButton.BUTTON_LEFT
+ ToggleButton.BUTTON_INNER
+ ToggleButton.BUTTON_RIGHT
+ " "
)
def render_line(self, y: int) -> Strip:
"""Render a line in the display.
Args:
y: The line to render.
Returns:
A [`Strip`][textual.strip.Strip] that is the line to render.
"""
# TODO: This is rather crufty and hard to fathom. Candidate for a rewrite.
# First off, get the underlying prompt from OptionList.
line = super().render_line(y)
# We know the prompt we're going to display, what we're going to do
# is place a CheckBox-a-like button next to it. So to start with
# let's pull out the actual Selection we're looking at right now.
_, scroll_y = self.scroll_offset
selection_index = scroll_y + y
try:
selection = self.get_option_at_index(selection_index)
except OptionDoesNotExist:
return line
# Figure out which component style is relevant for a checkbox on
# this particular line.
component_style = "selection-list--button"
if selection.value in self._selected:
component_style += "-selected"
if self.highlighted == selection_index:
component_style += "-highlighted"
# # # Get the underlying style used for the prompt.
# TODO: This is not a reliable way of getting the base style
underlying_style = next(iter(line)).style or self.rich_style
assert underlying_style is not None
# Get the style for the button.
button_style = self.get_component_rich_style(component_style)
# Build the style for the side characters. Note that this is
# sensitive to the type of character used, so pay attention to
# BUTTON_LEFT and BUTTON_RIGHT.
side_style = Style.from_color(button_style.bgcolor, underlying_style.bgcolor)
# Add the option index to the style. This is used to determine which
# option to select when the button is clicked or hovered.
side_style += Style(meta={"option": selection_index})
button_style += Style(meta={"option": selection_index})
# At this point we should have everything we need to place a
# "button" before the option.
return Strip(
[
Segment(ToggleButton.BUTTON_LEFT, style=side_style),
Segment(ToggleButton.BUTTON_INNER, style=button_style),
Segment(ToggleButton.BUTTON_RIGHT, style=side_style),
Segment(" ", style=underlying_style),
*line,
]
)
def _on_option_list_option_highlighted(
self, event: OptionList.OptionHighlighted
) -> None:
"""Capture the `OptionList` highlight event and turn it into a [`SelectionList`][textual.widgets.SelectionList] event.
Args:
event: The event to capture and recreate.
"""
event.stop()
self.post_message(self.SelectionHighlighted(self, event.option_index))
def _on_option_list_option_selected(self, event: OptionList.OptionSelected) -> None:
"""Capture the `OptionList` selected event and turn it into a [`SelectionList`][textual.widgets.SelectionList] event.
Args:
event: The event to capture and recreate.
"""
event.stop()
self._toggle_highlighted_selection()
def get_option_at_index(self, index: int) -> Selection[SelectionType]:
"""Get the selection option at the given index.
Args:
index: The index of the selection option to get.
Returns:
The selection option at that index.
Raises:
OptionDoesNotExist: If there is no selection option with the index.
"""
return cast("Selection[SelectionType]", super().get_option_at_index(index))
def get_option(self, option_id: str) -> Selection[SelectionType]:
"""Get the selection option with the given ID.
Args:
option_id: The ID of the selection option to get.
Returns:
The selection option with the ID.
Raises:
OptionDoesNotExist: If no selection option has the given ID.
"""
return cast("Selection[SelectionType]", super().get_option(option_id))
def _pre_remove_option(self, option: Option, index: int) -> None:
"""Hook called prior to removing an option."""
assert isinstance(option, Selection)
self._deselect(option.value)
del self._values[option.value]
# Decrement index of options after the one we just removed.
self._values = {
option_value: option_index - 1 if option_index > index else option_index
for option_value, option_index in self._values.items()
}
def add_options(
self,
items: Iterable[
OptionListContent
| Selection[SelectionType]
| tuple[ContentText, SelectionType]
| tuple[ContentText, SelectionType, bool]
],
) -> Self:
"""Add new selection options to the end of the list.
Args:
items: The new items to add.
Returns:
The [`SelectionList`][textual.widgets.SelectionList] instance.
Raises:
DuplicateID: If there is an attempt to use a duplicate ID.
SelectionError: If one of the selection options is of the wrong form.
"""
# This... is sort of sub-optimal, but a natural consequence of
# inheriting from and narrowing down OptionList. Here we don't want
# things like a separator, or a base Option, being passed in. So we
# extend the types of accepted items to keep mypy and friends happy,
# but then we runtime check that we've been given sensible types (in
# this case the supported tuple values).
cleaned_options: list[Selection[SelectionType]] = []
for item in items:
if isinstance(item, tuple):
cleaned_options.append(
self._make_selection(
cast(
"tuple[ContentText, SelectionType] | tuple[ContentText, SelectionType, bool]",
item,
)
)
)
elif isinstance(item, Selection):
cleaned_options.append(self._make_selection(item))
else:
raise SelectionError(
"Only Selection or a prompt/value tuple is supported in SelectionList"
)
# Add the new items to the value mappings.
self._values.update(
{
option.value: index
for index, option in enumerate(cleaned_options, start=self.option_count)
}
)
return super().add_options(cleaned_options)
def add_option(
self,
item: (
OptionListContent
| Selection
| tuple[ContentText, SelectionType]
| tuple[ContentText, SelectionType, bool]
) = None,
) -> Self:
"""Add a new selection option to the end of the list.
Args:
item: The new item to add.
Returns:
The [`SelectionList`][textual.widgets.SelectionList] instance.
Raises:
DuplicateID: If there is an attempt to use a duplicate ID.
SelectionError: If the selection option is of the wrong form.
"""
return self.add_options([item])
def clear_options(self) -> Self:
"""Clear the content of the selection list.
Returns:
The `SelectionList` instance.
"""
self._selected.clear()
self._values.clear()
return super().clear_options()
| SelectionList |
python | opencv__opencv-python | tests/test.py | {
"start": 29,
"end": 370
} | class ____(unittest.TestCase):
""" Simple functionality tests. """
def test_import(self):
""" Test that the cv2 module can be imported. """
import cv2
def test_video_capture(self):
import cv2
cap = cv2.VideoCapture("SampleVideo_1280x720_1mb.mp4")
self.assertTrue(cap.isOpened())
| OpenCVTest |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 25845,
"end": 26194
} | class ____(Pix2SkyProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - pixel to sky.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
\phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right)
"""
| Pix2Sky_HammerAitoff |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/dataplex.py | {
"start": 3808,
"end": 4075
} | class ____(BaseGoogleLink):
"""Helper class for constructing Dataplex Catalog AspectType link."""
name = "Dataplex Catalog AspectType"
key = "dataplex_catalog_aspect_type_key"
format_str = DATAPLEX_CATALOG_ASPECT_TYPE_LINK
| DataplexCatalogAspectTypeLink |
python | encode__django-rest-framework | tests/test_viewsets.py | {
"start": 3521,
"end": 6301
} | class ____(TestCase):
def test_initialize_view_set_with_actions(self):
request = factory.get('/', '', content_type='application/json')
my_view = BasicViewSet.as_view(actions={
'get': 'list',
})
response = my_view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {'ACTION': 'LIST'}
def test_head_request_against_viewset(self):
request = factory.head('/', '', content_type='application/json')
my_view = BasicViewSet.as_view(actions={
'get': 'list',
})
response = my_view(request)
assert response.status_code == status.HTTP_200_OK
def test_initialize_view_set_with_empty_actions(self):
with pytest.raises(TypeError) as excinfo:
BasicViewSet.as_view()
assert str(excinfo.value) == (
"The `actions` argument must be provided "
"when calling `.as_view()` on a ViewSet. "
"For example `.as_view({'get': 'list'})`")
def test_initialize_view_set_with_both_name_and_suffix(self):
with pytest.raises(TypeError) as excinfo:
BasicViewSet.as_view(name='', suffix='', actions={
'get': 'list',
})
assert str(excinfo.value) == (
"BasicViewSet() received both `name` and `suffix`, "
"which are mutually exclusive arguments.")
def test_args_kwargs_request_action_map_on_self(self):
"""
Test a view only has args, kwargs, request, action_map
once `as_view` has been called.
"""
bare_view = InstanceViewSet()
view = InstanceViewSet.as_view(actions={
'get': 'dummy',
})(factory.get('/')).data['view']
for attribute in ('args', 'kwargs', 'request', 'action_map'):
self.assertNotIn(attribute, dir(bare_view))
self.assertIn(attribute, dir(view))
def test_viewset_action_attr(self):
view = ActionViewSet.as_view(actions={'get': 'list'})
get = view(factory.get('/'))
head = view(factory.head('/'))
assert get.view.action == 'list'
assert head.view.action == 'list'
def test_viewset_action_attr_for_extra_action(self):
view = ActionViewSet.as_view(actions=dict(ActionViewSet.list_action.mapping))
get = view(factory.get('/'))
head = view(factory.head('/'))
assert get.view.action == 'list_action'
assert head.view.action == 'list_action'
@unittest.skipUnless(DJANGO_VERSION >= (5, 1), 'Only for Django 5.1+')
def test_login_required_middleware_compat(self):
view = ActionViewSet.as_view(actions={'get': 'list'})
assert view.login_required is False
| InitializeViewSetsTestCase |
python | euske__pdfminer | pdfminer/layout.py | {
"start": 12214,
"end": 12617
} | class ____(LTTextGroup):
def analyze(self, laparams):
LTTextGroup.analyze(self, laparams)
# reorder the objects from top-right to bottom-left.
self._objs = csort(self._objs, key=lambda obj:
-(1+laparams.boxes_flow)*(obj.x0+obj.x1)
- (1-laparams.boxes_flow)*(obj.y1))
return
## LTLayoutContainer
##
| LTTextGroupTBRL |
python | facebook__pyre-check | client/tests/coverage_data_tests.py | {
"start": 24033,
"end": 28241
} | class ____(testslide.TestCase):
ANNOTATION = cst.Annotation(cst.Name("Foo"))
def _parameter(self, name: str, annotated: bool) -> cst.Param:
return cst.Param(
name=cst.Name(name),
annotation=self.ANNOTATION if annotated else None,
)
def test_from_function_data(self) -> None:
self.assertEqual(
FunctionAnnotationStatus.from_function_data(
is_return_annotated=True,
is_non_static_method=False,
parameters=[
self._parameter("x0", annotated=True),
self._parameter("x1", annotated=True),
self._parameter("x2", annotated=True),
],
),
FunctionAnnotationStatus.FULLY_ANNOTATED,
)
self.assertEqual(
FunctionAnnotationStatus.from_function_data(
is_return_annotated=True,
is_non_static_method=False,
parameters=[
self._parameter("x0", annotated=False),
self._parameter("x1", annotated=False),
self._parameter("x2", annotated=False),
],
),
FunctionAnnotationStatus.PARTIALLY_ANNOTATED,
)
self.assertEqual(
FunctionAnnotationStatus.from_function_data(
is_return_annotated=False,
is_non_static_method=False,
parameters=[
self._parameter("x0", annotated=False),
self._parameter("x1", annotated=False),
self._parameter("x2", annotated=False),
],
),
FunctionAnnotationStatus.NOT_ANNOTATED,
)
self.assertEqual(
FunctionAnnotationStatus.from_function_data(
is_return_annotated=False,
is_non_static_method=False,
parameters=[
self._parameter("x0", annotated=True),
self._parameter("x1", annotated=False),
self._parameter("x2", annotated=False),
],
),
FunctionAnnotationStatus.PARTIALLY_ANNOTATED,
)
# An untyped `self` parameter of a method is not required, but it also
# does not count for partial annotation. As per PEP 484, we need an
# explicitly annotated parameter or return before we'll typecheck a method.
#
# Check several edge cases related to this.
self.assertEqual(
FunctionAnnotationStatus.from_function_data(
is_return_annotated=True,
is_non_static_method=True,
parameters=[
self._parameter("self", annotated=False),
self._parameter("x1", annotated=False),
],
),
FunctionAnnotationStatus.PARTIALLY_ANNOTATED,
)
self.assertEqual(
FunctionAnnotationStatus.from_function_data(
is_return_annotated=True,
is_non_static_method=True,
parameters=[
self._parameter("self", annotated=True),
self._parameter("x1", annotated=False),
],
),
FunctionAnnotationStatus.PARTIALLY_ANNOTATED,
)
self.assertEqual(
FunctionAnnotationStatus.from_function_data(
is_return_annotated=True,
is_non_static_method=True,
parameters=[
self._parameter("self", annotated=False),
],
),
FunctionAnnotationStatus.FULLY_ANNOTATED,
)
# An explicitly annotated `self` suffices to make Pyre typecheck the method.
self.assertEqual(
FunctionAnnotationStatus.from_function_data(
is_return_annotated=False,
is_non_static_method=True,
parameters=[
self._parameter("self", annotated=True),
],
),
FunctionAnnotationStatus.PARTIALLY_ANNOTATED,
)
| FunctionAnnotationStatusTest |
python | fastai__fastai | fastai/tabular/core.py | {
"start": 7077,
"end": 10134
} | class ____(CollBase, GetAttr, FilteredBase):
"A `DataFrame` wrapper that knows which cols are cont/cat/y, and returns rows in `__getitem__`"
_default,with_cont='procs',True
def __init__(self, df, procs=None, cat_names=None, cont_names=None, y_names=None, y_block=None, splits=None,
do_setup=True, device=None, inplace=False, reduce_memory=True):
if inplace and splits is not None and pd.options.mode.chained_assignment is not None:
warn("Using inplace with splits will trigger a pandas error. Set `pd.options.mode.chained_assignment=None` to avoid it.")
if not inplace: df = df.copy()
if reduce_memory: df = df_shrink(df)
if splits is not None: df = df.iloc[sum(splits, [])]
self.dataloaders = delegates(self._dl_type.__init__)(self.dataloaders)
super().__init__(df)
self.y_names,self.device = L(y_names),device
if y_block is None and self.y_names:
# Make ys categorical if they're not numeric
ys = df[self.y_names]
if len(ys.select_dtypes(include='number').columns)!=len(ys.columns): y_block = CategoryBlock()
else: y_block = RegressionBlock()
if y_block is not None and do_setup:
if callable(y_block): y_block = y_block()
procs = L(procs) + y_block.type_tfms
self.cat_names,self.cont_names,self.procs = L(cat_names),L(cont_names),Pipeline(procs)
self.split = len(df) if splits is None else len(splits[0])
if do_setup: self.setup()
def new(self, df, inplace=False):
return type(self)(df, do_setup=False, reduce_memory=False, y_block=TransformBlock(), inplace=inplace,
**attrdict(self, 'procs','cat_names','cont_names','y_names', 'device'))
def subset(self, i): return self.new(self.items[slice(0,self.split) if i==0 else slice(self.split,len(self))])
def copy(self): self.items = self.items.copy(); return self
def decode(self): return self.procs.decode(self)
def decode_row(self, row): return self.new(pd.DataFrame(row).T).decode().items.iloc[0]
def show(self, max_n=10, **kwargs): display_df(self.new(self.all_cols[:max_n]).decode().items)
def setup(self): self.procs.setup(self)
def process(self): self.procs(self)
def loc(self): return self.items.loc
def iloc(self): return _TabIloc(self)
def targ(self): return self.items[self.y_names]
def x_names (self): return self.cat_names + self.cont_names
def n_subsets(self): return 2
def y(self): return self[self.y_names[0]]
def new_empty(self): return self.new(pd.DataFrame({}, columns=self.items.columns))
def to_device(self, d=None):
self.device = d
return self
def all_col_names (self):
ys = [n for n in self.y_names if n in self.items.columns]
return self.x_names + self.y_names if len(ys) == len(self.y_names) else self.x_names
properties(Tabular,'loc','iloc','targ','all_col_names','n_subsets','x_names','y')
# %% ../../nbs/40_tabular.core.ipynb 51
| Tabular |
python | marshmallow-code__marshmallow | tests/test_options.py | {
"start": 2756,
"end": 4356
} | class ____:
class AddFieldsSchema(Schema):
name = fields.Str()
class Meta:
include = {"from": fields.Str()}
def test_fields_are_added(self):
s = self.AddFieldsSchema()
in_data = {"name": "Steve", "from": "Oskosh"}
result = s.load({"name": "Steve", "from": "Oskosh"})
assert result == in_data
def test_included_fields_ordered_after_declared_fields(self):
class AddFieldsOrdered(Schema):
name = fields.Str()
email = fields.Str()
class Meta:
include = {
"from": fields.Str(),
"in": fields.Str(),
"@at": fields.Str(),
}
s = AddFieldsOrdered()
in_data = {
"name": "Steve",
"from": "Oskosh",
"email": "steve@steve.steve",
"in": "VA",
"@at": "Charlottesville",
}
# declared fields, then "included" fields
expected_fields = ["name", "email", "from", "in", "@at"]
assert list(AddFieldsOrdered._declared_fields.keys()) == expected_fields
result = s.load(in_data)
assert list(result.keys()) == expected_fields
def test_added_fields_are_inherited(self):
class AddFieldsChild(self.AddFieldsSchema): # type: ignore[name-defined]
email = fields.Str()
s = AddFieldsChild()
assert "email" in s._declared_fields
assert "from" in s._declared_fields
assert isinstance(s._declared_fields["from"], fields.Str)
| TestIncludeOption |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/path_registry.py | {
"start": 12502,
"end": 13137
} | class ____(orm_base.InspectionAttr, HasCacheKey, str):
"""cacheable string token"""
_intern: Dict[str, PathToken] = {}
def _gen_cache_key(
self, anon_map: anon_map, bindparams: List[BindParameter[Any]]
) -> Tuple[Any, ...]:
return (str(self),)
@property
def _path_for_compare(self) -> Optional[_PathRepresentation]:
return None
@classmethod
def intern(cls, strvalue: str) -> PathToken:
if strvalue in cls._intern:
return cls._intern[strvalue]
else:
cls._intern[strvalue] = result = PathToken(strvalue)
return result
| PathToken |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass2.py | {
"start": 2150,
"end": 2380
} | class ____(ClassNChild): ...
n1 = ClassN()
reveal_type(n1.a, expected_text="str")
P1 = ParamSpec("P1", default=...)
P2 = ParamSpec("P2", default=P1)
P3 = ParamSpec("P3", default=P2)
P4 = ParamSpec("P4", default=[int, T1])
| ClassN |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 169624,
"end": 169730
} | class ____(spack.error.SpackError):
"""Raised when there is no possible compiler"""
| NoCompilerFoundError |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_os_login.py | {
"start": 1725,
"end": 2979
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.cloud.hooks.os_login.OSLoginHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = OSLoginHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.os_login.OSLoginHook.get_conn")
def test_import_ssh_public_key(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.import_ssh_public_key(
user=TEST_USER,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.import_ssh_public_key.assert_called_once_with(
request=dict(
parent=TEST_PARENT,
ssh_public_key=TEST_BODY,
project_id=TEST_PROJECT_ID,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestOSLoginHook |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 14952,
"end": 17040
} | class ____(TestCase):
def test_basic(self):
iterable = ['z', 'a', 'a', 'q', 'q', 'q', 'y']
actual = list(mi.distinct_permutations(iterable))
expected = set(permutations(iterable))
self.assertCountEqual(actual, expected)
def test_r(self):
for iterable, r in (
('mississippi', 0),
('mississippi', 1),
('mississippi', 6),
('mississippi', 7),
('mississippi', 12),
([0, 1, 1, 0], 0),
([0, 1, 1, 0], 1),
([0, 1, 1, 0], 2),
([0, 1, 1, 0], 3),
([0, 1, 1, 0], 4),
(['a'], 0),
(['a'], 1),
(['a'], 5),
([], 0),
([], 1),
([], 4),
):
with self.subTest(iterable=iterable, r=r):
expected = set(permutations(iterable, r))
actual = list(mi.distinct_permutations(iter(iterable), r))
self.assertCountEqual(actual, expected)
def test_unsortable(self):
iterable = ['1', 2, 2, 3, 3, 3]
actual = list(mi.distinct_permutations(iterable))
expected = set(permutations(iterable))
self.assertCountEqual(actual, expected)
def test_unsortable_r(self):
iterable = ['1', 2, 2, 3, 3, 3]
for r in range(len(iterable) + 1):
with self.subTest(iterable=iterable, r=r):
actual = list(mi.distinct_permutations(iterable, r=r))
expected = set(permutations(iterable, r=r))
self.assertCountEqual(actual, expected)
def test_unsorted_equivalent(self):
iterable = [1, True, '3']
actual = list(mi.distinct_permutations(iterable))
expected = set(permutations(iterable))
self.assertCountEqual(actual, expected)
def test_unhashable(self):
iterable = ([1], [1], 2)
actual = list(mi.distinct_permutations(iterable))
expected = list(mi.unique_everseen(permutations(iterable)))
self.assertCountEqual(actual, expected)
| DistinctPermutationsTests |
python | mwaskom__seaborn | tests/test_axisgrid.py | {
"start": 24063,
"end": 49574
} | class ____:
rs = np.random.RandomState(sum(map(ord, "PairGrid")))
df = pd.DataFrame(dict(x=rs.normal(size=60),
y=rs.randint(0, 4, size=(60)),
z=rs.gamma(3, size=60),
a=np.repeat(list("abc"), 20),
b=np.repeat(list("abcdefghijkl"), 5)))
def test_self_data(self):
g = ag.PairGrid(self.df)
assert g.data is self.df
def test_ignore_datelike_data(self):
df = self.df.copy()
df['date'] = pd.date_range('2010-01-01', periods=len(df), freq='d')
result = ag.PairGrid(self.df).data
expected = df.drop('date', axis=1)
tm.assert_frame_equal(result, expected)
def test_self_figure(self):
g = ag.PairGrid(self.df)
assert isinstance(g.figure, plt.Figure)
assert g.figure is g._figure
def test_self_axes(self):
g = ag.PairGrid(self.df)
for ax in g.axes.flat:
assert isinstance(ax, plt.Axes)
def test_default_axes(self):
g = ag.PairGrid(self.df)
assert g.axes.shape == (3, 3)
assert g.x_vars == ["x", "y", "z"]
assert g.y_vars == ["x", "y", "z"]
assert g.square_grid
@pytest.mark.parametrize("vars", [["z", "x"], np.array(["z", "x"])])
def test_specific_square_axes(self, vars):
g = ag.PairGrid(self.df, vars=vars)
assert g.axes.shape == (len(vars), len(vars))
assert g.x_vars == list(vars)
assert g.y_vars == list(vars)
assert g.square_grid
def test_remove_hue_from_default(self):
hue = "z"
g = ag.PairGrid(self.df, hue=hue)
assert hue not in g.x_vars
assert hue not in g.y_vars
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df, hue=hue, vars=vars)
assert hue in g.x_vars
assert hue in g.y_vars
@pytest.mark.parametrize(
"x_vars, y_vars",
[
(["x", "y"], ["z", "y", "x"]),
(["x", "y"], "z"),
(np.array(["x", "y"]), np.array(["z", "y", "x"])),
],
)
def test_specific_nonsquare_axes(self, x_vars, y_vars):
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
assert g.axes.shape == (len(y_vars), len(x_vars))
assert g.x_vars == list(x_vars)
assert g.y_vars == list(y_vars)
assert not g.square_grid
def test_corner(self):
plot_vars = ["x", "y", "z"]
g = ag.PairGrid(self.df, vars=plot_vars, corner=True)
corner_size = sum(i + 1 for i in range(len(plot_vars)))
assert len(g.figure.axes) == corner_size
g.map_diag(plt.hist)
assert len(g.figure.axes) == (corner_size + len(plot_vars))
for ax in np.diag(g.axes):
assert not ax.yaxis.get_visible()
plot_vars = ["x", "y", "z"]
g = ag.PairGrid(self.df, vars=plot_vars, corner=True)
g.map(scatterplot)
assert len(g.figure.axes) == corner_size
assert g.axes[0, 0].get_ylabel() == "x"
def test_size(self):
g1 = ag.PairGrid(self.df, height=3)
npt.assert_array_equal(g1.fig.get_size_inches(), (9, 9))
g2 = ag.PairGrid(self.df, height=4, aspect=.5)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 12))
g3 = ag.PairGrid(self.df, y_vars=["z"], x_vars=["x", "y"],
height=2, aspect=2)
npt.assert_array_equal(g3.fig.get_size_inches(), (8, 2))
def test_empty_grid(self):
with pytest.raises(ValueError, match="No variables found"):
ag.PairGrid(self.df[["a", "b"]])
def test_map(self):
vars = ["x", "y", "z"]
g1 = ag.PairGrid(self.df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(self.df, hue="a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate(self.df.a.unique()):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
def test_map_nonsquare(self):
x_vars = ["x"]
y_vars = ["y", "z"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
g.map(plt.scatter)
x_in = self.df.x
for i, i_var in enumerate(y_vars):
ax = g.axes[i, 0]
y_in = self.df[i_var]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
def test_map_lower(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_lower(plt.scatter)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.triu_indices_from(g.axes)):
ax = g.axes[i, j]
assert len(ax.collections) == 0
def test_map_upper(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_upper(plt.scatter)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes)):
ax = g.axes[i, j]
assert len(ax.collections) == 0
def test_map_mixed_funcsig(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df, vars=vars)
g.map_lower(scatterplot)
g.map_upper(plt.scatter)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
def test_map_diag(self):
g = ag.PairGrid(self.df)
g.map_diag(plt.hist)
for var, ax in zip(g.diag_vars, g.diag_axes):
assert len(ax.patches) == 10
assert pytest.approx(ax.patches[0].get_x()) == self.df[var].min()
g = ag.PairGrid(self.df, hue="a")
g.map_diag(plt.hist)
for ax in g.diag_axes:
assert len(ax.patches) == 30
g = ag.PairGrid(self.df, hue="a")
g.map_diag(plt.hist, histtype='step')
for ax in g.diag_axes:
for ptch in ax.patches:
assert not ptch.fill
def test_map_diag_rectangular(self):
x_vars = ["x", "y"]
y_vars = ["x", "z", "y"]
g1 = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
g1.map_diag(plt.hist)
g1.map_offdiag(plt.scatter)
assert set(g1.diag_vars) == (set(x_vars) & set(y_vars))
for var, ax in zip(g1.diag_vars, g1.diag_axes):
assert len(ax.patches) == 10
assert pytest.approx(ax.patches[0].get_x()) == self.df[var].min()
for j, x_var in enumerate(x_vars):
for i, y_var in enumerate(y_vars):
ax = g1.axes[i, j]
if x_var == y_var:
diag_ax = g1.diag_axes[j] # because fewer x than y vars
assert ax.bbox.bounds == diag_ax.bbox.bounds
else:
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, self.df[x_var])
assert_array_equal(y, self.df[y_var])
g2 = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars, hue="a")
g2.map_diag(plt.hist)
g2.map_offdiag(plt.scatter)
assert set(g2.diag_vars) == (set(x_vars) & set(y_vars))
for ax in g2.diag_axes:
assert len(ax.patches) == 30
x_vars = ["x", "y", "z"]
y_vars = ["x", "z"]
g3 = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
g3.map_diag(plt.hist)
g3.map_offdiag(plt.scatter)
assert set(g3.diag_vars) == (set(x_vars) & set(y_vars))
for var, ax in zip(g3.diag_vars, g3.diag_axes):
assert len(ax.patches) == 10
assert pytest.approx(ax.patches[0].get_x()) == self.df[var].min()
for j, x_var in enumerate(x_vars):
for i, y_var in enumerate(y_vars):
ax = g3.axes[i, j]
if x_var == y_var:
diag_ax = g3.diag_axes[i] # because fewer y than x vars
assert ax.bbox.bounds == diag_ax.bbox.bounds
else:
x, y = ax.collections[0].get_offsets().T
assert_array_equal(x, self.df[x_var])
assert_array_equal(y, self.df[y_var])
def test_map_diag_color(self):
color = "red"
g1 = ag.PairGrid(self.df)
g1.map_diag(plt.hist, color=color)
for ax in g1.diag_axes:
for patch in ax.patches:
assert_colors_equal(patch.get_facecolor(), color)
g2 = ag.PairGrid(self.df)
g2.map_diag(kdeplot, color='red')
for ax in g2.diag_axes:
for line in ax.lines:
assert_colors_equal(line.get_color(), color)
def test_map_diag_palette(self):
palette = "muted"
pal = color_palette(palette, n_colors=len(self.df.a.unique()))
g = ag.PairGrid(self.df, hue="a", palette=palette)
g.map_diag(kdeplot)
for ax in g.diag_axes:
for line, color in zip(ax.lines[::-1], pal):
assert_colors_equal(line.get_color(), color)
def test_map_diag_and_offdiag(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_offdiag(plt.scatter)
g.map_diag(plt.hist)
for ax in g.diag_axes:
assert len(ax.patches) == 10
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
assert len(ax.collections) == 0
def test_diag_sharey(self):
g = ag.PairGrid(self.df, diag_sharey=True)
g.map_diag(kdeplot)
for ax in g.diag_axes[1:]:
assert ax.get_ylim() == g.diag_axes[0].get_ylim()
def test_map_diag_matplotlib(self):
bins = 10
g = ag.PairGrid(self.df)
g.map_diag(plt.hist, bins=bins)
for ax in g.diag_axes:
assert len(ax.patches) == bins
levels = len(self.df["a"].unique())
g = ag.PairGrid(self.df, hue="a")
g.map_diag(plt.hist, bins=bins)
for ax in g.diag_axes:
assert len(ax.patches) == (bins * levels)
def test_palette(self):
rcmod.set()
g = ag.PairGrid(self.df, hue="a")
assert g.palette == color_palette(n_colors=len(self.df.a.unique()))
g = ag.PairGrid(self.df, hue="b")
assert g.palette == color_palette("husl", len(self.df.b.unique()))
g = ag.PairGrid(self.df, hue="a", palette="Set2")
assert g.palette == color_palette("Set2", len(self.df.a.unique()))
dict_pal = dict(a="red", b="green", c="blue")
list_pal = color_palette(["red", "green", "blue"])
g = ag.PairGrid(self.df, hue="a", palette=dict_pal)
assert g.palette == list_pal
list_pal = color_palette(["blue", "red", "green"])
g = ag.PairGrid(self.df, hue="a", hue_order=list("cab"),
palette=dict_pal)
assert g.palette == list_pal
def test_hue_kws(self):
kws = dict(marker=["o", "s", "d", "+"])
g = ag.PairGrid(self.df, hue="a", hue_kws=kws)
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
assert line.get_marker() == marker
g = ag.PairGrid(self.df, hue="a", hue_kws=kws,
hue_order=list("dcab"))
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
assert line.get_marker() == marker
def test_hue_order(self):
order = list("dcab")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
def test_hue_order_missing_level(self):
order = list("dcaeb")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
def test_hue_in_map(self, long_df):
g = ag.PairGrid(long_df, vars=["x", "y"])
g.map(scatterplot, hue=long_df["a"])
ax = g.axes.flat[0]
points = ax.collections[0]
assert len(set(map(tuple, points.get_facecolors()))) == 3
def test_nondefault_index(self):
df = self.df.copy().set_index("b")
plot_vars = ["x", "y", "z"]
g1 = ag.PairGrid(df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[plot_vars[j]]
y_in = self.df[plot_vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(df, hue="a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[plot_vars[j]]
y_in = self.df[plot_vars[i]]
for k, k_level in enumerate(self.df.a.unique()):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
@pytest.mark.parametrize("func", [scatterplot, plt.scatter])
def test_dropna(self, func):
df = self.df.copy()
n_null = 20
df.loc[np.arange(n_null), "x"] = np.nan
plot_vars = ["x", "y", "z"]
g1 = ag.PairGrid(df, vars=plot_vars, dropna=True)
g1.map(func)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = df[plot_vars[j]]
y_in = df[plot_vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
n_valid = (x_in * y_in).notnull().sum()
assert n_valid == len(x_out)
assert n_valid == len(y_out)
g1.map_diag(histplot)
for i, ax in enumerate(g1.diag_axes):
var = plot_vars[i]
count = sum(p.get_height() for p in ax.patches)
assert count == df[var].notna().sum()
def test_histplot_legend(self):
# Tests _extract_legend_handles
g = ag.PairGrid(self.df, vars=["x", "y"], hue="a")
g.map_offdiag(histplot)
g.add_legend()
assert len(get_legend_handles(g._legend)) == len(self.df["a"].unique())
def test_pairplot(self):
vars = ["x", "y", "z"]
g = ag.pairplot(self.df)
for ax in g.diag_axes:
assert len(ax.patches) > 1
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
assert len(ax.collections) == 0
g = ag.pairplot(self.df, hue="a")
n = len(self.df.a.unique())
for ax in g.diag_axes:
assert len(ax.collections) == n
def test_pairplot_reg(self):
vars = ["x", "y", "z"]
g = ag.pairplot(self.df, diag_kind="hist", kind="reg")
for ax in g.diag_axes:
assert len(ax.patches)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
assert len(ax.lines) == 1
assert len(ax.collections) == 2
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
assert len(ax.lines) == 1
assert len(ax.collections) == 2
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
assert len(ax.collections) == 0
def test_pairplot_reg_hue(self):
markers = ["o", "s", "d"]
g = ag.pairplot(self.df, kind="reg", hue="a", markers=markers)
ax = g.axes[-1, 0]
c1 = ax.collections[0]
c2 = ax.collections[2]
assert not np.array_equal(c1.get_facecolor(), c2.get_facecolor())
assert not np.array_equal(
c1.get_paths()[0].vertices, c2.get_paths()[0].vertices,
)
def test_pairplot_diag_kde(self):
vars = ["x", "y", "z"]
g = ag.pairplot(self.df, diag_kind="kde")
for ax in g.diag_axes:
assert len(ax.collections) == 1
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
assert len(ax.collections) == 0
def test_pairplot_kde(self):
f, ax1 = plt.subplots()
kdeplot(data=self.df, x="x", y="y", ax=ax1)
g = ag.pairplot(self.df, kind="kde")
ax2 = g.axes[1, 0]
assert_plots_equal(ax1, ax2, labels=False)
def test_pairplot_hist(self):
f, ax1 = plt.subplots()
histplot(data=self.df, x="x", y="y", ax=ax1)
g = ag.pairplot(self.df, kind="hist")
ax2 = g.axes[1, 0]
assert_plots_equal(ax1, ax2, labels=False)
@pytest.mark.skipif(_version_predates(mpl, "3.7.0"), reason="Matplotlib bug")
def test_pairplot_markers(self):
vars = ["x", "y", "z"]
markers = ["o", "X", "s"]
g = ag.pairplot(self.df, hue="a", vars=vars, markers=markers)
m1 = get_legend_handles(g._legend)[0].get_marker()
m2 = get_legend_handles(g._legend)[1].get_marker()
assert m1 != m2
with pytest.warns(UserWarning):
g = ag.pairplot(self.df, hue="a", vars=vars, markers=markers[:-2])
def test_pairplot_column_multiindex(self):
cols = pd.MultiIndex.from_arrays([["x", "y"], [1, 2]])
df = self.df[["x", "y"]].set_axis(cols, axis=1)
g = ag.pairplot(df)
assert g.diag_vars == list(cols)
def test_corner_despine(self):
g = ag.PairGrid(self.df, corner=True, despine=False)
g.map_diag(histplot)
assert g.axes[0, 0].spines["top"].get_visible()
def test_corner_set(self):
g = ag.PairGrid(self.df, corner=True, despine=False)
g.set(xlim=(0, 10))
assert g.axes[-1, 0].get_xlim() == (0, 10)
def test_legend(self):
g1 = ag.pairplot(self.df, hue="a")
assert isinstance(g1.legend, mpl.legend.Legend)
g2 = ag.pairplot(self.df)
assert g2.legend is None
def test_tick_params(self):
g = ag.PairGrid(self.df)
color = "red"
pad = 3
g.tick_params(pad=pad, color=color)
for ax in g.axes.flat:
for axis in ["xaxis", "yaxis"]:
for tick in getattr(ax, axis).get_major_ticks():
assert mpl.colors.same_color(tick.tick1line.get_color(), color)
assert mpl.colors.same_color(tick.tick2line.get_color(), color)
assert tick.get_pad() == pad
@pytest.mark.skipif(
condition=not hasattr(pd.api, "interchange"),
reason="Tests behavior assuming support for dataframe interchange"
)
def test_data_interchange(self, mock_long_df, long_df):
g = ag.PairGrid(mock_long_df, vars=["x", "y", "z"], hue="a")
g.map(scatterplot)
assert g.axes.shape == (3, 3)
for ax in g.axes.flat:
pts = ax.collections[0].get_offsets()
assert len(pts) == len(long_df)
| TestPairGrid |
python | pypa__pip | src/pip/_vendor/urllib3/exceptions.py | {
"start": 3633,
"end": 3765
} | class ____(ValueError, HTTPError):
"""Raised when there is something wrong with a given URL input."""
pass
| LocationValueError |
python | donnemartin__interactive-coding-challenges | sorting_searching/anagrams/test_anagrams.py | {
"start": 18,
"end": 531
} | class ____(unittest.TestCase):
def test_group_anagrams(self):
anagram = Anagram()
self.assertRaises(TypeError, anagram.group_anagrams, None)
data = ['ram', 'act', 'arm', 'bat', 'cat', 'tab']
expected = ['ram', 'arm', 'act', 'cat', 'bat', 'tab']
self.assertEqual(anagram.group_anagrams(data), expected)
print('Success: test_group_anagrams')
def main():
test = TestAnagrams()
test.test_group_anagrams()
if __name__ == '__main__':
main()
| TestAnagrams |
python | ray-project__ray | python/ray/util/metrics.py | {
"start": 10869,
"end": 12497
} | class ____(Metric):
"""Gauges keep the last recorded value and drop everything before.
Unlike counters, gauges can go up or down over time.
This corresponds to Prometheus' gauge metric:
https://prometheus.io/docs/concepts/metric_types/#gauge
Args:
name: Name of the metric.
description: Description of the metric.
tag_keys: Tag keys of the metric.
"""
def __init__(
self,
name: str,
description: str = "",
tag_keys: Optional[Tuple[str, ...]] = None,
):
super().__init__(name, description, tag_keys)
if self._discard_metric:
self._metric = None
else:
self._metric = CythonGauge(self._name, self._description, self._tag_keys)
def set(self, value: Optional[Union[int, float]], tags: Dict[str, str] = None):
"""Set the gauge to the given `value`.
Tags passed in will take precedence over the metric's default tags.
Args:
value(int, float): Value to set the gauge to. If `None`, this method is a
no-op.
tags(Dict[str, str]): Tags to set or override for this gauge.
"""
if value is None:
return
if not isinstance(value, (int, float)):
raise TypeError(f"value must be int or float, got {type(value)}.")
self._record(value, tags)
def __reduce__(self):
deserializer = Gauge
serialized_data = (self._name, self._description, self._tag_keys)
return deserializer, serialized_data
__all__ = [
"Counter",
"Histogram",
"Gauge",
]
| Gauge |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-markitdown/llama_index/readers/markitdown/base.py | {
"start": 464,
"end": 3110
} | class ____(BaseModel):
file_path: Union[str, Path, List[str], List[Path]]
@model_validator(mode="after")
def validate_file_path(self) -> Self:
if isinstance(self.file_path, str):
if not Path(self.file_path).is_dir():
if not Path(self.file_path).is_file():
raise ValueError("File or directory path does not exist")
else:
dir_files = [self.file_path]
else:
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
elif isinstance(self.file_path, Path):
if not self.file_path.is_dir():
if not self.file_path.is_file():
raise ValueError("File or directory path does not exist")
else:
dir_files = [self.file_path]
else:
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
empty, fls = is_empty(self.file_path)
if empty:
raise ValueError("There is no file to parse!")
else:
files = []
if isinstance(fls[0], str):
for fl in fls:
if Path(fl).is_file() and os.path.splitext(fl)[1] in [
".docx",
".html",
".xml",
".csv",
".pdf",
".pptx",
".xlsx",
".json",
".zip",
".txt",
"",
".md",
]:
files.append(fl)
else:
for fl in fls:
if fl.is_file() and os.path.splitext(fl)[1] in [
".docx",
".html",
".xml",
".csv",
".pdf",
".pptx",
".xlsx",
".json",
".zip",
".txt",
"",
".md",
]:
files.append(fl.__str__())
self.file_path = files
return self
| ValidFilePath |
python | google__jax | tests/ffi_test.py | {
"start": 13990,
"end": 16782
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
# Register callbacks before checking the number of devices to make sure
# that we're testing the registration path, even if we can't run the tests.
for target_name in ["lapack_sgeqrf_ffi", "cusolver_geqrf_ffi",
"hipsolver_geqrf_ffi"]:
jax.ffi.register_ffi_target_as_batch_partitionable(target_name)
if jax.device_count() < 2:
self.skipTest("Requires multiple devices")
if jtu.test_device_matches(["cpu"]):
lapack._lapack.initialize()
@jtu.run_on_devices("gpu", "cpu")
def test_shard_map(self):
mesh = jtu.create_mesh((len(jax.devices()),), ("i",))
x = self.rng().randn(8, 4, 5).astype(np.float32)
@partial(shard_map, mesh=mesh, in_specs=P("i"), out_specs=P("i"),
check_vma=False)
def f(x):
return batch_partitionable_ffi_call(x)
f(x) # eager mode doesn't crash
jax.jit(f)(x) # neither does JIT
self.assertNotIn("all-gather", jax.jit(f).lower(x).compile().as_text())
@jtu.run_on_devices("gpu", "cpu")
def test_batch_partitioning(self):
def f(x):
return batch_partitionable_ffi_call(x)
mesh = jtu.create_mesh((len(jax.devices()),), ("i",))
x = self.rng().randn(8, 4, 5).astype(np.float32)
x_sharding = jax.NamedSharding(mesh, P("i"))
x = jax.device_put(x, x_sharding)
f_jit = jax.jit(f, out_shardings=x_sharding)
f(x) # eager mode doesn't crash
f_jit(x) # neither does JIT
self.assertNotIn("all-gather", f_jit.lower(x).compile().as_text())
def batch_partitionable_ffi_call(x):
return batch_partitionable_p.bind(x)
batch_partitionable_p = core.Primitive("batch_partitionable")
batch_partitionable_p.multiple_results = True
dispatch.simple_impl(batch_partitionable_p)
@batch_partitionable_p.def_abstract_eval
def _batch_partitionable_abstract_eval(x):
return x, core.ShapedArray(x.shape[:-1], x.dtype)
def _batch_partitionable_lowering(target_name, ctx, x):
x_aval, = ctx.avals_in
num_batch_dims = len(x_aval.shape) - 2
frontend_attrs = mlir.ir_attribute({"num_batch_dims": str(num_batch_dims)})
return jax.ffi.ffi_lowering(
target_name,
extra_attributes={"mhlo.frontend_attributes": frontend_attrs}
)(ctx, x)
mlir.register_lowering(
batch_partitionable_p,
partial(_batch_partitionable_lowering, "lapack_sgeqrf_ffi"),
platform="cpu",
)
mlir.register_lowering(
batch_partitionable_p,
partial(_batch_partitionable_lowering, "cusolver_geqrf_ffi"),
platform="cuda",
)
mlir.register_lowering(
batch_partitionable_p,
partial(_batch_partitionable_lowering, "hipsolver_geqrf_ffi"),
platform="rocm",
)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| BatchPartitioningTest |
python | getsentry__sentry | tests/sentry/notifications/test_notificationcontroller.py | {
"start": 1610,
"end": 31571
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
setting_option_1 = add_notification_setting_option(
scope_type=NotificationScopeEnum.USER,
scope_identifier=self.user.id,
type=NotificationSettingEnum.DEPLOY,
value=NotificationSettingsOptionEnum.ALWAYS,
user_id=self.user.id,
)
setting_option_2 = add_notification_setting_option(
scope_type=NotificationScopeEnum.PROJECT,
scope_identifier=self.project.id,
type=NotificationSettingEnum.ISSUE_ALERTS,
value=NotificationSettingsOptionEnum.ALWAYS,
user_id=self.user.id,
)
setting_option_3 = add_notification_setting_option(
scope_type=NotificationScopeEnum.ORGANIZATION,
scope_identifier=self.organization.id,
type=NotificationSettingEnum.ISSUE_ALERTS,
value=NotificationSettingsOptionEnum.NEVER,
user_id=self.user.id,
)
self.setting_options = [setting_option_1, setting_option_2, setting_option_3]
setting_provider_1 = add_notification_setting_provider(
scope_type=NotificationScopeEnum.USER,
scope_identifier=self.user.id,
provider=ExternalProviderEnum.SLACK,
type=NotificationSettingEnum.DEPLOY,
value=NotificationSettingsOptionEnum.ALWAYS,
user_id=self.user.id,
)
setting_provider_2 = add_notification_setting_provider(
scope_type=NotificationScopeEnum.PROJECT,
scope_identifier=self.project.id,
provider=ExternalProviderEnum.MSTEAMS,
type=NotificationSettingEnum.ISSUE_ALERTS,
value=NotificationSettingsOptionEnum.NEVER,
user_id=self.user.id,
)
setting_provider_3 = add_notification_setting_provider(
scope_type=NotificationScopeEnum.USER,
scope_identifier=self.user.id,
provider=ExternalProviderEnum.EMAIL,
type=NotificationSettingEnum.WORKFLOW,
value=NotificationSettingsOptionEnum.ALWAYS,
user_id=self.user.id,
)
self.setting_providers = [setting_provider_1, setting_provider_2, setting_provider_3]
def test_get_all_setting_options(self) -> None:
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert list(controller.get_all_setting_options) == self.setting_options
NotificationSettingOption.objects.all().delete()
assert list(controller.get_all_setting_options) == self.setting_options
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert list(controller.get_all_setting_options) == []
def test_get_all_setting_providers(self) -> None:
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert list(controller.get_all_setting_providers) == self.setting_providers
def test_without_settings(self) -> None:
rpc_user = Actor.from_object(self.user)
NotificationSettingOption.objects.all().delete()
NotificationSettingProvider.objects.all().delete()
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert controller.get_all_setting_options == []
assert controller.get_all_setting_providers == []
options = controller._get_layered_setting_options()
assert (
options[self.user][NotificationSettingEnum.ISSUE_ALERTS]
== NotificationSettingsOptionEnum.ALWAYS
)
providers = controller._get_layered_setting_providers()
assert (
providers[self.user][NotificationSettingEnum.ISSUE_ALERTS][
ExternalProviderEnum.MSTEAMS.value
]
== NotificationSettingsOptionEnum.NEVER
)
assert (
providers[self.user][NotificationSettingEnum.DEPLOY][ExternalProviderEnum.SLACK.value]
== NotificationSettingsOptionEnum.COMMITTED_ONLY
)
enabled_settings = controller.get_combined_settings()[self.user]
assert (
enabled_settings[NotificationSettingEnum.ISSUE_ALERTS][ExternalProviderEnum.SLACK.value]
== NotificationSettingsOptionEnum.ALWAYS
)
assert controller.get_notification_recipients(
type=NotificationSettingEnum.ISSUE_ALERTS
) == {ExternalProviders.EMAIL: {rpc_user}, ExternalProviders.SLACK: {rpc_user}}
assert not controller.user_has_any_provider_settings(provider=ExternalProviderEnum.SLACK)
assert not controller.user_has_any_provider_settings(provider=ExternalProviderEnum.MSTEAMS)
def test_filter_setting_options(self) -> None:
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
filtered_options = controller._filter_options(type=NotificationSettingEnum.DEPLOY.value)
assert filtered_options == [self.setting_options[0]]
filtered_options = controller._filter_options(
type=NotificationSettingEnum.ISSUE_ALERTS.value
)
assert filtered_options == self.setting_options[1:]
filtered_options = controller._filter_options(
type=NotificationSettingEnum.ISSUE_ALERTS.value,
scope_type=NotificationScopeEnum.PROJECT.value,
)
assert filtered_options == [self.setting_options[1]]
def test_filter_setting_providers(self) -> None:
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
filtered_providers = controller._filter_providers(type=NotificationSettingEnum.DEPLOY.value)
assert filtered_providers == [self.setting_providers[0]]
filtered_providers = controller._filter_providers(
value=NotificationSettingsOptionEnum.ALWAYS.value
)
assert filtered_providers == [self.setting_providers[0], self.setting_providers[2]]
filtered_providers = controller._filter_providers(
type=NotificationSettingEnum.DEPLOY.value,
value=NotificationSettingsOptionEnum.ALWAYS.value,
)
assert filtered_providers == [self.setting_providers[0]]
def test_layering(self) -> None:
NotificationSettingOption.objects.all().delete()
top_level_option = add_notification_setting_option(
scope_type=NotificationScopeEnum.PROJECT,
scope_identifier=self.project.id,
type=NotificationSettingEnum.WORKFLOW,
value=NotificationSettingsOptionEnum.NEVER,
user_id=self.user.id,
)
add_notification_setting_option(
scope_type=NotificationScopeEnum.USER,
scope_identifier=self.user.id,
type=NotificationSettingEnum.WORKFLOW,
value=NotificationSettingsOptionEnum.ALWAYS,
user_id=self.user.id,
)
add_notification_setting_option(
scope_type=NotificationScopeEnum.ORGANIZATION,
scope_identifier=self.organization.id,
type=NotificationSettingEnum.WORKFLOW,
value=NotificationSettingsOptionEnum.SUBSCRIBE_ONLY,
user_id=self.user.id,
)
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
options = controller._get_layered_setting_options()
assert options[self.user][NotificationSettingEnum.WORKFLOW].value == top_level_option.value
NotificationSettingProvider.objects.all().delete()
top_level_provider = add_notification_setting_provider(
scope_type=NotificationScopeEnum.PROJECT,
scope_identifier=self.project.id,
provider=ExternalProviderEnum.EMAIL,
type=NotificationSettingEnum.WORKFLOW,
value=NotificationSettingsOptionEnum.SUBSCRIBE_ONLY,
user_id=self.user.id,
)
add_notification_setting_provider(
scope_type=NotificationScopeEnum.USER,
scope_identifier=self.user.id,
provider=ExternalProviderEnum.EMAIL,
type=NotificationSettingEnum.WORKFLOW,
value=NotificationSettingsOptionEnum.ALWAYS,
user_id=self.user.id,
)
add_notification_setting_provider(
scope_type=NotificationScopeEnum.ORGANIZATION,
scope_identifier=self.organization.id,
provider=ExternalProviderEnum.EMAIL,
type=NotificationSettingEnum.WORKFLOW,
value=NotificationSettingsOptionEnum.SUBSCRIBE_ONLY,
user_id=self.user.id,
)
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
providers = controller._get_layered_setting_providers()
assert (
providers[self.user][NotificationSettingEnum.WORKFLOW][
ExternalProviderEnum.EMAIL.value
].value
== top_level_provider.value
)
assert (
providers[self.user][NotificationSettingEnum.DEPLOY][ExternalProviderEnum.EMAIL.value]
== NotificationSettingsOptionEnum.COMMITTED_ONLY
)
assert (
providers[self.user][NotificationSettingEnum.DEPLOY][ExternalProviderEnum.MSTEAMS.value]
== NotificationSettingsOptionEnum.NEVER
)
def test_get_layered_setting_options(self) -> None:
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
options = controller._get_layered_setting_options()
assert (
options[self.user][NotificationSettingEnum.DEPLOY].value
== self.setting_options[0].value
)
assert (
options[self.user][NotificationSettingEnum.ISSUE_ALERTS].value
== self.setting_options[1].value
)
options = controller._get_layered_setting_options(
type=NotificationSettingEnum.ISSUE_ALERTS.value
)
assert (
options[self.user][NotificationSettingEnum.ISSUE_ALERTS].value
== self.setting_options[1].value
)
def test_get_layered_setting_options_defaults(self) -> None:
new_user = self.create_user()
setting_option_1 = add_notification_setting_option(
scope_type=NotificationScopeEnum.ORGANIZATION,
scope_identifier=self.organization.id,
type=NotificationSettingEnum.ISSUE_ALERTS,
value=NotificationSettingsOptionEnum.NEVER,
user_id=new_user.id,
)
controller = NotificationController(
recipients=[new_user, self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
options = controller._get_layered_setting_options()
assert (
options[new_user][NotificationSettingEnum.ISSUE_ALERTS].value == setting_option_1.value
)
user_options = options[self.user]
assert (
user_options[NotificationSettingEnum.ISSUE_ALERTS].value
== self.setting_options[1].value
)
assert user_options[NotificationSettingEnum.DEPLOY].value == self.setting_options[0].value
assert (
user_options[NotificationSettingEnum.WORKFLOW]
== NotificationSettingsOptionEnum.SUBSCRIBE_ONLY
)
def test_get_layered_setting_providers_defaults(self) -> None:
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
options = controller._get_layered_setting_providers()
user_options = options[self.user]
assert (
user_options[NotificationSettingEnum.ISSUE_ALERTS][
ExternalProviderEnum.MSTEAMS.value
].value
== self.setting_providers[1].value
)
assert (
user_options[NotificationSettingEnum.DEPLOY][ExternalProviderEnum.SLACK.value].value
== self.setting_providers[0].value
)
assert (
user_options[NotificationSettingEnum.WORKFLOW][ExternalProviderEnum.EMAIL.value].value
== self.setting_providers[2].value
)
def test_get_setting_providers_with_defaults(self) -> None:
new_user = self.create_user()
setting_provider_1 = add_notification_setting_provider(
scope_type=NotificationScopeEnum.ORGANIZATION,
scope_identifier=self.organization.id,
provider=ExternalProviderEnum.MSTEAMS,
type=NotificationSettingEnum.ISSUE_ALERTS,
value=NotificationSettingsOptionEnum.NEVER,
user_id=new_user.id,
)
controller = NotificationController(
recipients=[self.user, new_user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
options = controller._get_layered_setting_providers()
assert (
options[new_user][NotificationSettingEnum.ISSUE_ALERTS][
ExternalProviderEnum.MSTEAMS.value
].value
== setting_provider_1.value
)
user_options = options[self.user]
assert (
user_options[NotificationSettingEnum.ISSUE_ALERTS][
ExternalProviderEnum.MSTEAMS.value
].value
== self.setting_providers[1].value
)
assert (
user_options[NotificationSettingEnum.DEPLOY][ExternalProviderEnum.SLACK.value].value
== self.setting_providers[0].value
)
assert (
user_options[NotificationSettingEnum.WORKFLOW][ExternalProviderEnum.EMAIL.value].value
== self.setting_providers[2].value
)
def test_get_combined_settings(self) -> None:
new_user = self.create_user()
self.create_member(
organization=self.organization, user=new_user, role="member", teams=[self.team]
)
_ = add_notification_setting_option(
scope_type=NotificationScopeEnum.PROJECT,
scope_identifier=self.project.id,
type=NotificationSettingEnum.ISSUE_ALERTS,
value=NotificationSettingsOptionEnum.ALWAYS,
user_id=new_user.id,
)
_ = add_notification_setting_provider(
scope_type=NotificationScopeEnum.USER,
scope_identifier=new_user.id,
provider=ExternalProviderEnum.MSTEAMS,
type=NotificationSettingEnum.ISSUE_ALERTS,
value=NotificationSettingsOptionEnum.ALWAYS,
user_id=new_user.id,
)
controller = NotificationController(
recipients=[self.user, new_user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
enabled_settings = controller.get_combined_settings()
# Settings for self.user
for type, expected_setting in [
(
NotificationSettingEnum.DEPLOY,
{
ExternalProviderEnum.EMAIL.value: NotificationSettingsOptionEnum.ALWAYS,
ExternalProviderEnum.SLACK.value: NotificationSettingsOptionEnum.ALWAYS,
},
),
(
NotificationSettingEnum.WORKFLOW,
{
ExternalProviderEnum.EMAIL.value: NotificationSettingsOptionEnum.SUBSCRIBE_ONLY,
ExternalProviderEnum.SLACK.value: NotificationSettingsOptionEnum.SUBSCRIBE_ONLY,
},
),
(
NotificationSettingEnum.ISSUE_ALERTS,
{
ExternalProviderEnum.EMAIL.value: NotificationSettingsOptionEnum.ALWAYS,
ExternalProviderEnum.SLACK.value: NotificationSettingsOptionEnum.ALWAYS,
},
),
(
NotificationSettingEnum.REPORTS,
{
ExternalProviderEnum.EMAIL.value: NotificationSettingsOptionEnum.ALWAYS,
},
),
(
NotificationSettingEnum.QUOTA,
{
ExternalProviderEnum.SLACK.value: NotificationSettingsOptionEnum.ALWAYS,
ExternalProviderEnum.EMAIL.value: NotificationSettingsOptionEnum.ALWAYS,
},
),
]:
provider_settings = enabled_settings[self.user][type]
assert provider_settings == expected_setting
# Settings for new_user
for type, expected_setting in [
(
NotificationSettingEnum.DEPLOY,
{
ExternalProviderEnum.EMAIL.value: NotificationSettingsOptionEnum.COMMITTED_ONLY,
ExternalProviderEnum.SLACK.value: NotificationSettingsOptionEnum.COMMITTED_ONLY,
},
),
(
NotificationSettingEnum.WORKFLOW,
{
ExternalProviderEnum.EMAIL.value: NotificationSettingsOptionEnum.SUBSCRIBE_ONLY,
ExternalProviderEnum.SLACK.value: NotificationSettingsOptionEnum.SUBSCRIBE_ONLY,
},
),
(
NotificationSettingEnum.ISSUE_ALERTS,
{
ExternalProviderEnum.EMAIL.value: NotificationSettingsOptionEnum.ALWAYS,
ExternalProviderEnum.SLACK.value: NotificationSettingsOptionEnum.ALWAYS,
ExternalProviderEnum.MSTEAMS.value: NotificationSettingsOptionEnum.ALWAYS,
},
),
(
NotificationSettingEnum.REPORTS,
{
ExternalProviderEnum.EMAIL.value: NotificationSettingsOptionEnum.ALWAYS,
},
),
(
NotificationSettingEnum.QUOTA,
{
ExternalProviderEnum.SLACK.value: NotificationSettingsOptionEnum.ALWAYS,
ExternalProviderEnum.EMAIL.value: NotificationSettingsOptionEnum.ALWAYS,
},
),
]:
provider_settings = enabled_settings[new_user][type]
assert provider_settings == expected_setting
def test_get_notification_recipients(self) -> None:
rpc_user = Actor.from_object(self.user)
new_user = self.create_user()
rpc_new_user = Actor.from_object(new_user)
self.create_member(
organization=self.organization, user=new_user, role="member", teams=[self.team]
)
_ = add_notification_setting_option(
scope_type=NotificationScopeEnum.PROJECT,
scope_identifier=self.project.id,
type=NotificationSettingEnum.ISSUE_ALERTS,
value=NotificationSettingsOptionEnum.ALWAYS,
user_id=new_user.id,
)
_ = add_notification_setting_provider(
scope_type=NotificationScopeEnum.USER,
scope_identifier=new_user.id,
provider=ExternalProviderEnum.MSTEAMS,
type=NotificationSettingEnum.ISSUE_ALERTS,
value=NotificationSettingsOptionEnum.ALWAYS,
user_id=new_user.id,
)
controller = NotificationController(
recipients=[self.user, new_user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
recipients = controller.get_notification_recipients(
type=NotificationSettingEnum.ISSUE_ALERTS,
actor_type=ActorType.USER,
)
assert recipients[ExternalProviders.SLACK] == {rpc_user, rpc_new_user}
assert recipients[ExternalProviders.EMAIL] == {rpc_user, rpc_new_user}
assert recipients[ExternalProviders.MSTEAMS] == {rpc_new_user}
def test_user_has_any_provider_settings(self) -> None:
controller = NotificationController(
recipients=[self.user],
organization_id=self.organization.id,
)
assert controller.user_has_any_provider_settings(provider=ExternalProviderEnum.SLACK)
assert controller.user_has_any_provider_settings(provider=ExternalProviderEnum.EMAIL)
assert not controller.user_has_any_provider_settings(provider=ExternalProviderEnum.MSTEAMS)
def test_get_subscriptions_status_for_projects(self) -> None:
add_notification_setting_option(
scope_type=NotificationScopeEnum.PROJECT,
scope_identifier=self.project.id,
type=NotificationSettingEnum.WORKFLOW,
value=NotificationSettingsOptionEnum.NEVER,
user_id=self.user.id,
)
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert controller.get_subscriptions_status_for_projects(
project_ids=[self.project.id],
user=self.user,
type=NotificationSettingEnum.DEPLOY,
) == {
self.project.id: GroupSubscriptionStatus(
is_disabled=False, is_active=True, has_only_inactive_subscriptions=False
)
}
assert controller.get_subscriptions_status_for_projects(
project_ids=[self.project.id],
user=self.user,
type=NotificationSettingEnum.WORKFLOW,
) == {
self.project.id: GroupSubscriptionStatus(
is_disabled=True, is_active=False, has_only_inactive_subscriptions=True
)
}
assert controller.get_subscriptions_status_for_projects(
project_ids=[self.project.id],
user=self.user,
type=NotificationSettingEnum.QUOTA,
) == {
self.project.id: GroupSubscriptionStatus(
is_disabled=False, is_active=True, has_only_inactive_subscriptions=False
)
}
def test_get_participants(self) -> None:
rpc_user = Actor.from_object(self.user)
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
type=NotificationSettingEnum.ISSUE_ALERTS,
)
assert controller.get_participants() == {
rpc_user: {
ExternalProviders.EMAIL: NotificationSettingsOptionEnum.ALWAYS,
ExternalProviders.SLACK: NotificationSettingsOptionEnum.ALWAYS,
}
}
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
type=NotificationSettingEnum.WORKFLOW,
)
assert controller.get_participants() == {
rpc_user: {
ExternalProviders.EMAIL: NotificationSettingsOptionEnum.SUBSCRIBE_ONLY,
ExternalProviders.SLACK: NotificationSettingsOptionEnum.SUBSCRIBE_ONLY,
}
}
def test_get_notification_value_for_recipient_and_type(self) -> None:
add_notification_setting_option(
scope_type=NotificationScopeEnum.USER,
scope_identifier=self.user.id,
type=NotificationSettingEnum.WORKFLOW,
value=NotificationSettingsOptionEnum.SUBSCRIBE_ONLY,
user_id=self.user.id,
)
add_notification_setting_option(
scope_type=NotificationScopeEnum.ORGANIZATION,
scope_identifier=self.organization.id,
type=NotificationSettingEnum.QUOTA_ERRORS,
value=NotificationSettingsOptionEnum.NEVER,
user_id=self.user.id,
)
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert (
controller.get_notification_value_for_recipient_and_type(
recipient=self.user,
type=NotificationSettingEnum.DEPLOY,
)
== NotificationSettingsOptionEnum.ALWAYS
)
assert (
controller.get_notification_value_for_recipient_and_type(
recipient=self.user,
type=NotificationSettingEnum.WORKFLOW,
)
== NotificationSettingsOptionEnum.SUBSCRIBE_ONLY
)
assert (
controller.get_notification_value_for_recipient_and_type(
recipient=self.user,
type=NotificationSettingEnum.QUOTA_ERRORS,
)
== NotificationSettingsOptionEnum.NEVER
)
def test_get_notification_provider_value_for_recipient_and_type(self) -> None:
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert (
controller.get_notification_provider_value_for_recipient_and_type(
recipient=self.user,
type=NotificationSettingEnum.DEPLOY,
provider=ExternalProviderEnum.SLACK,
)
== NotificationSettingsOptionEnum.ALWAYS
)
def test_get_notification_value_for_recipient_and_type_with_layering(self) -> None:
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert (
controller.get_notification_value_for_recipient_and_type(
recipient=self.user,
type=NotificationSettingEnum.DEPLOY,
)
== NotificationSettingsOptionEnum.ALWAYS
)
# overrides the user setting in setUp()
add_notification_setting_option(
scope_type=NotificationScopeEnum.ORGANIZATION,
scope_identifier=self.organization.id,
type=NotificationSettingEnum.DEPLOY,
value=NotificationSettingsOptionEnum.NEVER,
user_id=self.user.id,
)
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert (
controller.get_notification_value_for_recipient_and_type(
recipient=self.user,
type=NotificationSettingEnum.DEPLOY,
)
== NotificationSettingsOptionEnum.NEVER
)
def test_get_notification_provider_value_for_recipient_and_type_with_layering(self) -> None:
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert (
controller.get_notification_provider_value_for_recipient_and_type(
recipient=self.user,
type=NotificationSettingEnum.WORKFLOW,
provider=ExternalProviderEnum.EMAIL,
)
== NotificationSettingsOptionEnum.ALWAYS
)
# overrides the user setting in setUp()
add_notification_setting_provider(
scope_type=NotificationScopeEnum.ORGANIZATION,
scope_identifier=self.organization.id,
provider=ExternalProviderEnum.EMAIL,
type=NotificationSettingEnum.WORKFLOW,
value=NotificationSettingsOptionEnum.NEVER,
user_id=self.user.id,
)
controller = NotificationController(
recipients=[self.user],
project_ids=[self.project.id],
organization_id=self.organization.id,
)
assert (
controller.get_notification_provider_value_for_recipient_and_type(
recipient=self.user,
type=NotificationSettingEnum.WORKFLOW,
provider=ExternalProviderEnum.EMAIL,
)
== NotificationSettingsOptionEnum.NEVER
)
def test_get_users_for_weekly_reports(self) -> None:
controller = NotificationController(
recipients=[self.user],
organization_id=self.organization.id,
type=NotificationSettingEnum.REPORTS,
)
assert controller.get_users_for_weekly_reports() == [self.user.id]
add_notification_setting_option(
scope_type=NotificationScopeEnum.USER,
scope_identifier=self.user.id,
type=NotificationSettingEnum.REPORTS,
value=NotificationSettingsOptionEnum.NEVER,
user_id=self.user.id,
)
controller = NotificationController(
recipients=[self.user],
organization_id=self.organization.id,
type=NotificationSettingEnum.REPORTS,
)
assert controller.get_users_for_weekly_reports() == []
| NotificationControllerTest |
python | huggingface__transformers | src/transformers/models/deformable_detr/modeling_deformable_detr.py | {
"start": 19311,
"end": 21156
} | class ____(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.embedding_dim = embedding_dim
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, pixel_values, pixel_mask):
if pixel_mask is None:
raise ValueError("No pixel mask provided")
y_embed = pixel_mask.cumsum(1, dtype=pixel_values.dtype)
x_embed = pixel_mask.cumsum(2, dtype=pixel_values.dtype)
if self.normalize:
eps = 1e-6
y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.embedding_dim, dtype=pixel_values.dtype, device=pixel_values.device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
# Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding
| DeformableDetrSinePositionEmbedding |
python | getsentry__sentry | tests/sentry/conduit/test_tasks.py | {
"start": 568,
"end": 2108
} | class ____(TestCase):
@override_settings(
CONDUIT_PUBLISH_SECRET="test-secret",
CONDUIT_PUBLISH_JWT_ISSUER="test-issuer",
CONDUIT_PUBLISH_JWT_AUDIENCE="test-audience",
)
def test_generate_jwt_uses_settings(self):
"""Test that generate_jwt uses settings when parameters are not provided."""
token = generate_jwt(subject="test-subject")
claims = pyjwt.decode(token, options={"verify_signature": False})
assert claims["sub"] == "test-subject"
assert claims["iss"] == "test-issuer"
assert claims["aud"] == "test-audience"
assert "exp" in claims
def test_generate_jwt_uses_provided_parameters(self):
"""Test that generate_jwt uses parameters when provided."""
token = generate_jwt(
subject="test-subject",
issuer="custom-issuer",
audience="custom-audience",
secret="custom-secret",
)
claims = pyjwt.decode(token, options={"verify_signature": False})
assert claims["sub"] == "test-subject"
assert claims["iss"] == "custom-issuer"
assert claims["aud"] == "custom-audience"
assert "exp" in claims
def test_generate_jwt_raises_when_secret_missing(self):
"""Test that generate_jwt raises ValueError when secret is not configured."""
with pytest.raises(ValueError) as context:
generate_jwt(subject="test-subject")
assert "CONDUIT_PUBLISH_SECRET not configured" in str(context.value)
| GenerateJWTTest |
python | google__jax | tests/ann_test.py | {
"start": 1929,
"end": 7305
} | class ____(jtu.JaxTestCase):
# TODO(b/258315194) Investigate probability property when input is around
# few thousands.
@jtu.sample_product(
qy_shape=[(200, 128), (128, 128)],
db_shape=[(128, 500), (128, 3000)],
dtype=jtu.dtypes.all_floating,
k=[1, 10],
recall=[0.95],
)
def test_approx_max_k(self, qy_shape, db_shape, dtype, k, recall):
rng = jtu.rand_default(self.rng())
qy = rng(qy_shape, dtype)
db = rng(db_shape, dtype)
scores = lax.dot(qy, db)
_, gt_args = lax.top_k(scores, k)
_, ann_args = lax.approx_max_k(scores, k, recall_target=recall)
self.assertEqual(k, len(ann_args[0]))
ann_recall = compute_recall(np.asarray(ann_args), np.asarray(gt_args))
self.assertGreaterEqual(ann_recall, recall*0.9)
@jtu.sample_product(
qy_shape=[(200, 128), (128, 128)],
db_shape=[(128, 500), (128, 3000)],
dtype=jtu.dtypes.all_floating,
k=[1, 10],
recall=[0.95],
)
def test_approx_min_k(self, qy_shape, db_shape, dtype, k, recall):
rng = jtu.rand_default(self.rng())
qy = rng(qy_shape, dtype)
db = rng(db_shape, dtype)
scores = lax.dot(qy, db)
_, gt_args = lax.top_k(-scores, k)
_, ann_args = lax.approx_min_k(scores, k, recall_target=recall)
ann_recall = compute_recall(np.asarray(ann_args), np.asarray(gt_args))
self.assertGreaterEqual(ann_recall, recall*0.9)
@jtu.sample_product(
dtype=[np.float32],
shape=[(4,), (5, 5), (2, 1, 4)],
k=[1, 3],
is_max_k=[True, False],
)
def test_autodiff(self, shape, dtype, k, is_max_k):
vals = np.arange(math.prod(shape), dtype=dtype)
vals = self.rng().permutation(vals).reshape(shape)
if is_max_k:
fn = lambda vs: lax.approx_max_k(vs, k=k)[0]
else:
fn = lambda vs: lax.approx_min_k(vs, k=k)[0]
jtu.check_grads(fn, (vals,), 2, ["fwd", "rev"], eps=1e-2)
@jtu.sample_product(
qy_shape=[(200, 128), (128, 128)],
db_shape=[(2048, 128)],
dtype=jtu.dtypes.all_floating,
k=[1, 10],
recall=[0.9, 0.95],
)
def test_pmap(self, qy_shape, db_shape, dtype, k, recall):
num_devices = jax.device_count()
rng = jtu.rand_default(self.rng())
qy = rng(qy_shape, dtype)
db = rng(db_shape, dtype)
db_size = db.shape[0]
gt_scores = lax.dot_general(qy, db, (([1], [1]), ([], [])))
_, gt_args = lax.top_k(-gt_scores, k) # negate the score to get min-k
db_per_device = db_size//num_devices
sharded_db = db.reshape(num_devices, db_per_device, 128)
db_offsets = np.arange(num_devices, dtype=np.int32) * db_per_device
def parallel_topk(qy, db, db_offset):
scores = lax.dot_general(qy, db, (([1],[1]),([],[])))
ann_vals, ann_args = lax.approx_min_k(
scores,
k=k,
reduction_dimension=1,
recall_target=recall,
reduction_input_size_override=db_size,
aggregate_to_topk=False)
return (ann_vals, ann_args + db_offset)
# shape = qy_size, num_devices, approx_dp
ann_vals, ann_args = jax.pmap(
parallel_topk,
in_axes=(None, 0, 0),
out_axes=(1, 1))(qy, sharded_db, db_offsets)
# collapse num_devices and approx_dp
ann_vals = lax.collapse(ann_vals, 1, 3)
ann_args = lax.collapse(ann_args, 1, 3)
ann_vals, ann_args = lax.sort_key_val(ann_vals, ann_args, dimension=1)
ann_args = lax.slice_in_dim(ann_args, start_index=0, limit_index=k, axis=1)
ann_recall = compute_recall(np.asarray(ann_args), np.asarray(gt_args))
self.assertGreater(ann_recall, recall)
def test_vmap_before(self):
batch = 4
qy_size = 128
db_size = 1024
feature_dim = 32
k = 10
rng = jtu.rand_default(self.rng())
qy = rng([batch, qy_size, feature_dim], np.float32)
db = rng([batch, db_size, feature_dim], np.float32)
recall = 0.95
# Create ground truth
gt_scores = lax.dot_general(qy, db, (([2], [2]), ([0], [0])))
_, gt_args = lax.top_k(gt_scores, k)
gt_args = lax.reshape(gt_args, [qy_size * batch, k])
# test target
def approx_max_k(qy, db):
scores = qy @ db.transpose()
return lax.approx_max_k(scores, k)
_, ann_args = jax.vmap(approx_max_k, (0, 0))(qy, db)
ann_args = lax.reshape(ann_args, [qy_size * batch, k])
ann_recall = compute_recall(np.asarray(ann_args), np.asarray(gt_args))
self.assertGreater(ann_recall, recall)
def test_vmap_after(self):
batch = 8
qy_size = 128
db_size = 1024
feature_dim = 32
k = 10
rng = jtu.rand_default(self.rng())
qy = rng([qy_size, feature_dim, batch], np.float32)
db = rng([db_size, feature_dim, batch], np.float32)
recall = 0.95
# Create ground truth
gt_scores = lax.dot_general(qy, db, (([1], [1]), ([2], [2])))
_, gt_args = lax.top_k(gt_scores, k)
gt_args = lax.transpose(gt_args, [2, 0, 1])
gt_args = lax.reshape(gt_args, [qy_size * batch, k])
# test target
def approx_max_k(qy, db):
scores = qy @ db.transpose()
return lax.approx_max_k(scores, k)
_, ann_args = jax.vmap(approx_max_k, (2, 2))(qy, db)
ann_args = lax.transpose(ann_args, [2, 0, 1])
ann_args = lax.reshape(ann_args, [qy_size * batch, k])
ann_recall = compute_recall(np.asarray(ann_args), np.asarray(gt_args))
self.assertGreater(ann_recall, recall)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| AnnTest |
python | doocs__leetcode | solution/1400-1499/1456.Maximum Number of Vowels in a Substring of Given Length/Solution.py | {
"start": 0,
"end": 297
} | class ____:
def maxVowels(self, s: str, k: int) -> int:
vowels = set("aeiou")
ans = cnt = sum(c in vowels for c in s[:k])
for i in range(k, len(s)):
cnt += int(s[i] in vowels) - int(s[i - k] in vowels)
ans = max(ans, cnt)
return ans
| Solution |
python | huggingface__transformers | src/transformers/generation/continuous_batching/scheduler.py | {
"start": 9090,
"end": 13179
} | class ____(Scheduler):
"""This scheduler processes requests in the order they arrive, meaning decoding requests has priority over
prefilling requests. Additionally, it includes a safety margin mechanism to prevent cache exhaustion. By default,
when 80% of the cache is full, new requests will not be scheduled to prioritize decoding active requests."""
def __init__(self, cache: PagedAttentionCache, retain_cache_on_finish: bool = False, safety_margin: float = 0.2):
"""Initializes the FIFO scheduler. The safety margin is the percentage of free blocks under which we stop
scheduling new prefill requests, so safety_margin = 0.1 means that when there is less than 10% of free blocks,
or equivalently when more than 90% of blocks are already allocated, we stop scheduling new prefill requests.
"""
super().__init__(cache, retain_cache_on_finish)
self.safety_margin = safety_margin
@traced
def schedule_batch(self, token_budget: int) -> list[RequestState]:
priority_states: list[RequestState] = []
second_priority_states: list[RequestState] = []
scheduled_requests = []
for state in self.active_requests.values():
if state.status == RequestStatus.DECODING:
priority_states.append(state)
if state.status in [RequestStatus.SPLIT_PENDING_REMAINDER, RequestStatus.PREFILLING_SPLIT]:
second_priority_states.append(state)
# Add waiting requests to second priority
for req_id in self.waiting_requests_order:
second_priority_states.append(self.waiting_requests[req_id])
candidates = priority_states + second_priority_states
request_ids_to_remove_from_waiting = set()
safety_margins = self.safety_margin * self.cache.num_blocks
for state in candidates:
# If we are out the safety margin, we only accept decoding requests or the first prefill request
num_free_blocks = self.cache.get_num_free_blocks()
outside_safety_margin = num_free_blocks < safety_margins
if outside_safety_margin and scheduled_requests and state.status != RequestStatus.DECODING:
break
self._prepare_request_for_processing(state, token_budget, request_ids_to_remove_from_waiting)
request_len = len(state.tokens_to_process)
# If we can't allocate blocks, do not schedule the request and break if the cache is full
if not self._allocate_blocks_if_needed(state):
if self.cache.get_num_free_blocks() == 0:
break
continue
# Add the request to the scheduled requests
scheduled_requests.append(state)
# Update the token budget
token_budget -= request_len
# If using prefix sharing, we make note of the blocks that will be computed in the forward pass
if self.cache.use_prefix_sharing:
tokens_in_current_block = state.current_len() % self.cache.block_size
tokens_after_forward = tokens_in_current_block + request_len
complete_blocks = tokens_after_forward // self.cache.block_size
self.cache.blocks_to_complete[state.request_id] = complete_blocks
# Remove the request from the waiting queue and mark it as removed
req_id = state.request_id
was_waiting = self.waiting_requests.pop(req_id, None) is not None
if was_waiting:
request_ids_to_remove_from_waiting.add(req_id)
# Early exit of the loop if we have no token budget left
if token_budget == 0:
break
self.waiting_requests_order = deque(
[req_id for req_id in self.waiting_requests_order if req_id not in request_ids_to_remove_from_waiting]
)
return scheduled_requests
# FIXME: prioritize adding from waiting reqs before scheduling `RequestStatus.DECODING` when cache space allows it
@attach_tracer()
| FIFOScheduler |
python | pallets__click | src/click/exceptions.py | {
"start": 8480,
"end": 8740
} | class ____(UsageError):
"""Raised if an argument is generally supplied but the use of the argument
was incorrect. This is for instance raised if the number of values
for an argument is not correct.
.. versionadded:: 6.0
"""
| BadArgumentUsage |
python | google__pytype | pytype/tests/test_functions1.py | {
"start": 3297,
"end": 29437
} | class ____(test_base.BaseTest):
"""Tests for functions."""
def test_functions(self):
self.Check("""
def fn(a, b=17, c="Hello", d=[]):
d.append(99)
print(a, b, c, d)
fn(1)
fn(2, 3)
fn(3, c="Bye")
fn(4, d=["What?"])
fn(5, "b", "c")
""")
def test_function_locals(self):
self.Check("""
def f():
x = "Spite"
print(x)
def g():
x = "Malice"
print(x)
x = "Humility"
f()
print(x)
g()
print(x)
""")
def test_recursion(self):
self.Check("""
def fact(n):
if n <= 1:
return 1
else:
return n * fact(n-1)
f6 = fact(6)
print(f6)
assert f6 == 720
""")
def test_calling_functions_with_args_kwargs(self):
self.Check("""
def fn(a, b=17, c="Hello", d=[]):
d.append(99)
print(a, b, c, d)
fn(6, *[77, 88])
fn(**{'c': 23, 'a': 7})
fn(6, *[77], **{'c': 23, 'd': [123]})
""")
def test_calling_functions_with_generator_args(self):
self.Check("""
class A:
def next(self):
raise StopIteration()
def __iter__(self):
return A()
def f(*args):
pass
f(*A())
""")
def test_defining_functions_with_args_kwargs(self):
self.Check("""
def fn(*args):
print("args is %r" % (args,))
fn(1, 2)
""")
self.Check("""
def fn(**kwargs):
print("kwargs is %r" % (kwargs,))
fn(red=True, blue=False)
""")
self.Check("""
def fn(*args, **kwargs):
print("args is %r" % (args,))
print("kwargs is %r" % (kwargs,))
fn(1, 2, red=True, blue=False)
""")
self.Check("""
def fn(x, y, *args, **kwargs):
print("x is %r, y is %r" % (x, y))
print("args is %r" % (args,))
print("kwargs is %r" % (kwargs,))
fn('a', 'b', 1, 2, red=True, blue=False)
""")
def test_defining_functions_with_empty_args_kwargs(self):
self.Check("""
def fn(*args):
print("args is %r" % (args,))
fn()
""")
self.Check("""
def fn(**kwargs):
print("kwargs is %r" % (kwargs,))
fn()
""")
self.Check("""
def fn(*args, **kwargs):
print("args is %r, kwargs is %r" % (args, kwargs))
fn()
""")
def test_partial(self):
self.Check("""
from functools import partial
def f(a,b):
return a-b
f7 = partial(f, 7)
four = f7(3)
assert four == 4
""")
def test_partial_with_kwargs(self):
self.Check("""
from functools import partial
def f(a,b,c=0,d=0):
return (a,b,c,d)
f7 = partial(f, b=7, c=1)
them = f7(10)
assert them == (10,7,1,0)
""")
def test_wraps(self):
with test_utils.Tempdir() as d:
d.create_file(
"myfunctools.pyi",
"""
from typing import Any, Callable, Sequence
from typing import Any
_AnyCallable = Callable[..., Any]
def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_AnyCallable], _AnyCallable]: ...
""",
)
self.Check(
"""
from myfunctools import wraps
def my_decorator(f):
dec = wraps(f)
def wrapper(*args, **kwds):
print('Calling decorated function')
return f(*args, **kwds)
wrapper = dec(wrapper)
return wrapper
@my_decorator
def example():
'''Docstring'''
return 17
assert example() == 17
""",
pythonpath=[d.path],
)
def test_pass_through_args(self):
self.Check("""
def f(a, b):
return a * b
def g(*args, **kwargs):
return f(*args, **kwargs)
assert_type(g(1, 2), int)
""")
def test_pass_through_kwargs(self):
self.Check("""
def f(a, b):
return a * b
def g(*args, **kwargs):
return f(*args, **kwargs)
assert_type(g(a=1, b=2), int)
""")
def test_pass_through_named_args_and_kwargs(self):
self.CheckWithErrors("""
def f(a: int, b: str):
pass
def g(*args, **kwargs):
return f(*args, a='a', **kwargs) # wrong-arg-types
""")
def test_pass_through_partial_named_args_and_kwargs(self):
self.Check("""
class Foo:
def __init__(self, name, labels):
pass
def g(name, bar, **kwargs):
Foo(name=name, **kwargs)
def f(name, x, **args):
g(name=name, bar=x, **args)
f('a', 10, labels=None)
""")
def test_list_comprehension(self):
ty = self.Infer("""
def f(elements):
return "%s" % ",".join(t for t in elements)
""")
self.assertTypesMatchPytd(
ty,
"""
def f(elements) -> str: ...
""",
)
def test_named_arg_unsolvable_max_depth(self):
# Main test here is for this not to throw a KeyError exception upon hitting
# maximum depth.
_, errors = self.InferWithErrors(
"""
def f(x):
return max(foo=repr(__any_object__)) # wrong-keyword-args[e]
""",
maximum_depth=1,
)
self.assertErrorRegexes(errors, {"e": r"foo.*max"})
def test_multiple_signatures_with_type_parameter(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List, TypeVar
T = TypeVar("T")
def f(x: T, y: int) -> List[T]: ...
def f(x: List[T], y: str) -> List[T]: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x, y):
return foo.f(x, y)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
def f(x, y) -> list: ...
""",
)
def test_multiple_signatures_with_multiple_type_parameter(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List, Tuple, TypeVar
T = TypeVar("T")
def f(arg1: int) -> List[T]: ...
def f(arg2: str) -> Tuple[T, T]: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x):
return foo.f(x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
def f(x) -> Any: ...
""",
)
def test_unknown_single_signature(self):
# Test that the right signature is picked in the presence of an unknown
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List, TypeVar
T = TypeVar("T")
def f(x: T, y: int) -> List[T]: ...
def f(x: List[T], y: str) -> List[T]: ...
""",
)
ty = self.Infer(
"""
import foo
def f(y):
return foo.f("", y)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import List
def f(y) -> List[str]: ...
""",
)
def test_unknown_with_solved_type_parameter(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List, TypeVar
T = TypeVar("T")
def f(x: T, y: T) -> List[T]: ...
def f(x: List[T], y: T) -> List[T]: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x):
return foo.f(x, "")
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any, Union
def f(x) -> list: ...
""",
)
def test_unknown_with_extra_information(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List, TypeVar
T = TypeVar("T")
def f(x: T) -> List[T]: ...
def f(x: List[T]) -> List[T]: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x):
return foo.f(x)[0].isnumeric()
def g(x):
return foo.f(x) + [""]
def h(x):
ret = foo.f(x)
x + ""
return ret
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any, List, MutableSequence
def f(x) -> Any: ...
def g(x) -> list: ...
def h(x) -> list: ...
""",
)
def test_type_parameter_in_return(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Generic, TypeVar
T = TypeVar("T")
class MyPattern(Generic[T]):
def match(self, string: T) -> MyMatch[T]: ...
class MyMatch(Generic[T]):
pass
def compile() -> MyPattern[T]: ...
""",
)
ty = self.Infer(
"""
import foo
x = foo.compile().match("")
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
x = ... # type: foo.MyMatch[str]
""",
)
def test_multiple_signatures(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(x: str) -> float: ...
def f(x: int, y: bool) -> int: ...
""",
)
ty = self.Infer(
"""
import foo
x = foo.f(0, True)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
x = ... # type: int
""",
)
def test_multiple_signatures_with_unknown(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(arg1: str) -> float: ...
def f(arg2: int) -> bool: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x):
return foo.f(x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
def f(x) -> Any: ...
""",
)
def test_multiple_signatures_with_optional_arg(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(x: str) -> int: ...
def f(*args) -> float: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x):
return foo.f(x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
def f(x) -> Any: ...
""",
)
def test_multiple_signatures_with_kwarg(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(*, y: int) -> bool: ...
def f(y: str) -> float: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x):
return foo.f(y=x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
def f(x) -> Any: ...
""",
)
def test_isinstance(self):
ty = self.Infer("""
def f(isinstance=isinstance):
pass
def g():
f()
def h():
return isinstance
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Callable, Tuple, Union
def f(isinstance = ...) -> None: ...
def g() -> None: ...
def h() -> Callable[[Any, Union[Tuple[Union[Tuple[type, ...], type], ...], type]], bool]: ...
""",
)
def test_wrong_keyword(self):
errors = self.CheckWithErrors("""
def f(x):
pass
f("", y=42) # wrong-keyword-args[e]
""")
self.assertErrorRegexes(errors, {"e": r"y"})
def test_staticmethod_class(self):
ty = self.Infer("""
v1, = (object.__new__,)
v2 = type(object.__new__)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, Type
v1 = ... # type: Callable
v2 = ... # type: Type[Callable]
""",
)
def test_function_class(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f() -> None: ...
""",
)
ty = self.Infer(
"""
import foo
def f(): pass
v1 = (foo.f,)
v2 = type(foo.f)
w1 = (f,)
w2 = type(f)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any, Callable, Tuple
def f() -> None: ...
v1 = ... # type: Tuple[Callable[[], None]]
v2 = Callable
w1 = ... # type: Tuple[Callable[[], Any]]
w2 = Callable
""",
)
def test_type_parameter_visibility(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Tuple, TypeVar, Union
T = TypeVar("T")
def f(x: T) -> Tuple[Union[T, str], int]: ...
""",
)
ty = self.Infer(
"""
import foo
v1, v2 = foo.f(42j)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Union
v1 = ... # type: Union[str, complex]
v2 = ... # type: int
""",
)
def test_pytd_function_in_class(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def bar(): ...
""",
)
self.Check(
"""
import foo
class A:
bar = foo.bar
def f(self):
self.bar()
""",
pythonpath=[d.path],
)
def test_interpreter_function_in_class(self):
errors = self.CheckWithErrors("""
class A:
bar = lambda x: x
def f(self):
self.bar(42) # wrong-arg-count[e]
""")
self.assertErrorRegexes(errors, {"e": r"1.*2"})
def test_lambda(self):
# Inspired by b/243664545
self.CheckWithErrors("""
def f():
a = lambda: 1 + "" # unsupported-operands
""")
def test_nested_lambda(self):
# Inspired by b/37869955
self.Check("""
def f(c):
return lambda c: f(c)
""")
def test_nested_lambda2(self):
self.Check("""
def f(d):
return lambda c: f(c)
""")
def test_nested_lambda3(self):
self.Check("""
def f(t):
lambda u=[t,1]: f(u)
""")
def test_set_defaults(self):
self.Check("""
import collections
X = collections.namedtuple("X", "a b c d")
X.__new__.__defaults__ = (3, 4)
a = X(1, 2)
b = X(1, 2, 3)
c = X(1, 2, 3, 4)
""")
def test_set_defaults_non_new(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
def b(x: int, y: int, z: int): ...
""",
)
ty = self.Infer(
"""
import a
a.b.__defaults__ = ('3',)
a.b(1, 2)
c = a.b
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
def c(x: int, y: int, z: int = ...): ...
""",
)
def test_bad_defaults(self):
self.InferWithErrors("""
import collections
X = collections.namedtuple("X", "a b c")
X.__new__.__defaults__ = (1) # bad-function-defaults
""")
def test_multiple_valid_defaults(self):
self.Check("""
import collections
X = collections.namedtuple("X", "a b c")
X.__new__.__defaults__ = (1,) if __random__ else (1,2)
X(0) # should not cause an error
""")
def test_set_defaults_to_expression(self):
# Test that get_atomic_python_constant fails but get_atomic_value pulls out
# a tuple Instance.
self.Check("""
import collections
X = collections.namedtuple("X", "a b c")
X.__new__.__defaults__ = (None,) * len(X._fields)
""")
def test_set_defaults_non_tuple_instance(self):
# Test that get_atomic_python_constant fails and get_atomic_value pulls out
# a non-tuple Instance.
self.InferWithErrors("""
import collections
X = collections.namedtuple("X", "a b c")
X.__new__.__defaults__ = (lambda x: x)(0) # bad-function-defaults
""")
def test_set_builtin_defaults(self):
self.assertNoCrash(
self.Check,
"""
import os
os.chdir.__defaults__ = ("/",)
os.chdir()
""",
)
def test_interpreter_function_defaults(self):
self.Check("""
def test(a, b, c = 4):
return a + b + c
x = test(1, 2)
test.__defaults__ = (3, 4)
y = test(1, 2)
y = test(1)
test.__defaults__ = (2, 3, 4)
z = test()
z = test(1)
z = test(1, 2)
z = test(1, 2, 3)
""")
self.InferWithErrors("""
def test(a, b, c):
return a + b + c
x = test(1, 2) # missing-parameter
test.__defaults__ = (3,)
x = test(1, 2)
x = test(1) # missing-parameter
""")
def test_interpreter_function_defaults_on_class(self):
self.InferWithErrors("""
class Foo:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
a = Foo() # missing-parameter
Foo.__init__.__defaults__ = (1, 2)
b = Foo(0)
c = Foo() # missing-parameter
""")
def test_split_on_kwargs(self):
ty = self.Infer("""
def make_foo(**kwargs):
varargs = kwargs.pop("varargs", None)
if kwargs:
raise TypeError()
return varargs
Foo = make_foo(varargs=True)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Optional
def make_foo(**kwargs) -> Any: ...
Foo = ... # type: bool
""",
)
def test_pyi_starargs(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(x: str, *args) -> None: ...
""",
)
self.CheckWithErrors(
"""
import foo
foo.f(True, False) # wrong-arg-types
""",
pythonpath=[d.path],
)
def test_starargs_matching_pyi_posargs(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(x: int, y: int, z: int) -> None: ...
""",
)
self.CheckWithErrors(
"""
import foo
def g(x, *args):
foo.f(x, *args)
foo.f(x, 1, *args)
foo.f(x, 1) # missing-parameter
""",
pythonpath=[d.path],
)
def test_starargs_forwarding(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(x: int) -> None: ...
""",
)
self.Check(
"""
import foo
def f(x, y, *args):
for i in args:
foo.f(i)
def g(*args):
f(1, 2, *args)
""",
pythonpath=[d.path],
)
def test_infer_bound_pytd_func(self):
ty = self.Infer("""
import struct
if __random__:
int2byte = struct.Struct(">B").pack
else:
int2byte = chr
""")
self.assertTypesMatchPytd(
ty,
"""
import struct
from typing import overload
@overload
def int2byte(*v) -> bytes: ...
@overload
def int2byte(i: int) -> str: ...
""",
)
def test_preserve_return_union(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Union
def f(x: int) -> Union[int, str]: ...
def f(x: float) -> Union[int, str]: ...
""",
)
ty = self.Infer(
"""
import foo
v = foo.f(__any_object__)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Union
v = ... # type: Union[int, str]
""",
)
def test_call_with_varargs_and_kwargs(self):
self.Check("""
def foo(an_arg):
pass
def bar(an_arg, *args, **kwargs):
foo(an_arg, *args, **kwargs)
""")
def test_functools_partial(self):
ty = self.Infer("""
import functools
def f(a, b):
pass
partial_f = functools.partial(f, 0)
""")
self.assertTypesMatchPytd(
ty,
"""
import functools
def f(a, b) -> None: ...
partial_f: functools.partial
""",
)
def test_functools_partial_star(self):
self.Check("""
from typing import Any
import functools
def f() -> int :
return 42
def test(*args):
assert_type(functools.partial(*args), functools.partial[Any])
# This is WAI in pytype, since *args currently overwrite *all* other
# arguments, including the ones bound positionally prior to *args.
assert_type(functools.partial(f, *args), functools.partial[Any])
""")
def test_functools_partial_kw(self):
self.Check("""
import functools
def f(a, b=None):
pass
partial_f = functools.partial(f, 0)
partial_f(0)
""")
def test_functools_partial_cls(self):
# This is a smoke test that **kwargs do not interfere with the receiver
# in partial.__new__.
self.Check("""
import functools
def f(cls):
return cls
partial_f = functools.partial(f, cls=int)
partial_f()
""")
def test_functools_partial_starstar(self):
self.Check("""
import functools
def f(*, a: str, b: int):
pass
def test(**kwargs):
partial_f = functools.partial(f, **kwargs)
partial_f()
""")
def test_functools_partial_called_with_starstar(self):
self.Check("""
import functools
def f(a: str, b: int, c: list):
pass
partial_f = functools.partial(f, "foo")
def test(**kwargs):
partial_f(42, **kwargs)
""")
def test_functools_star_everywhere(self):
self.Check("""
import functools
def f(a: str, b: int):
pass
def test(args, extra_args):
partial_f = functools.partial(f, *args)
partial_f(*extra_args)
""")
def test_functools_starstar_everywhere(self):
self.Check("""
import functools
def f(*, a: str, b: int):
pass
def test(**kwargs):
partial_f = functools.partial(f, **kwargs)
partial_f(**kwargs)
""")
def test_functools_partial_overloaded(self):
self.Check("""
import functools
from typing import overload
@overload
def f(x: int) -> int: ...
@overload
def f(x: str) -> str: ...
def f(x):
return x
partial_f = functools.partial(f)
# TODO(slebedev): This should be functools.partial[int | str].
assert_type(partial_f, functools.partial)
assert_type(partial_f(1), int)
assert_type(partial_f("s"), str)
""")
def test_functools_partial_overloaded_with_star(self):
self.Check("""
import functools
from typing import overload
@overload
def f(x: int, y: int) -> int: ...
@overload
def f(x: str, y: str) -> str: ...
def f(x, y):
return x
partial_f = functools.partial(f, 42)
def test(*args):
# TODO(slebedev): This should be functools.partial[int].
assert_type(partial_f, functools.partial)
assert_type(partial_f(*args), int)
""")
def test_functools_partial_class(self):
self.Check("""
import functools
class X:
def __init__(self, a, b):
pass
PartialX = functools.partial(X, 0)
PartialX(0)
""")
def test_functools_partial_class_kw(self):
self.Check("""
import functools
class X:
def __init__(self, a, b=None):
pass
PartialX = functools.partial(X, 0)
PartialX(0)
""")
def test_functools_partial_bad_call(self):
errors = self.CheckWithErrors("""
import functools
functools.partial() # wrong-arg-count
functools.partial(42) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"Callable.*int"})
def test_bad_comprehensions(self):
# Test that we report errors in comprehensions and generators only once
# while still reporting errors in lambdas.
self.CheckWithErrors("""
[name_error1 for x in ()] # name-error
{name_error2 for x in ()} # name-error
(name_error3 for x in ()) # name-error
lambda x: name_error4 # name-error
""")
def test_new_function(self):
ty = self.Infer("""
import types
def new_function(code, globals):
return types.FunctionType(code, globals)
""")
self.assertTypesMatchPytd(
ty,
"""
import types
from typing import Callable
def new_function(code, globals) -> Callable: ...
""",
)
def test_function_globals(self):
ty = self.Infer("""
def f():
def g():
pass
return g.__globals__
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Dict
def f() -> Dict[str, Any]: ...
""",
)
def test_hashable(self):
self.Check("""
from typing import Hashable
def f(x):
# type: (Hashable) -> None
pass
def g():
pass
f(g)
""")
if __name__ == "__main__":
test_base.main()
| TestFunctions |
python | scipy__scipy | scipy/stats/_hypotests.py | {
"start": 49510,
"end": 70626
} | class ____:
statistic: float
pvalue: float
@xp_capabilities(np_only=True)
def boschloo_exact(table, alternative="two-sided", n=32):
r"""Perform Boschloo's exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes section below.
n : int, optional
Number of sampling points used in the construction of the sampling
method. Note that this argument will automatically be converted to
the next higher power of 2 since `scipy.stats.qmc.Sobol` is used to
select sample points. Default is 32. Must be positive. In most cases,
32 points is enough to reach good precision. More points comes at
performance cost.
Returns
-------
ber : BoschlooExactResult
A result object with the following attributes.
statistic : float
The statistic used in Boschloo's test; that is, the p-value
from Fisher's exact test.
pvalue : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
fisher_exact : Fisher exact test on a 2x2 contingency table.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
Boschloo's test is an exact test used in the analysis of contingency
tables. It examines the association of two categorical variables, and
is a uniformly more powerful alternative to Fisher's exact test
for 2x2 contingency tables.
Boschloo's exact test uses the p-value of Fisher's exact test as a
statistic, and Boschloo's p-value is the probability under the null
hypothesis of observing such an extreme value of this statistic.
Let's define :math:`X_0` a 2x2 matrix representing the observed sample,
where each column stores the binomial experiment, as in the example
below. Let's also define :math:`p_1, p_2` the theoretical binomial
probabilities for :math:`x_{11}` and :math:`x_{12}`. When using
Boschloo exact test, we can assert three different alternative hypotheses:
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 < p_2`,
with `alternative` = "less"
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 > p_2`,
with `alternative` = "greater"
- :math:`H_0 : p_1=p_2` versus :math:`H_1 : p_1 \neq p_2`,
with `alternative` = "two-sided" (default)
There are multiple conventions for computing a two-sided p-value when the
null distribution is asymmetric. Here, we apply the convention that the
p-value of a two-sided test is twice the minimum of the p-values of the
one-sided tests (clipped to 1.0). Note that `fisher_exact` follows a
different convention, so for a given `table`, the statistic reported by
`boschloo_exact` may differ from the p-value reported by `fisher_exact`
when ``alternative='two-sided'``.
.. versionadded:: 1.7.0
References
----------
.. [1] R.D. Boschloo. "Raised conditional level of significance for the
2 x 2-table when testing the equality of two probabilities",
Statistica Neerlandica, 24(1), 1970
.. [2] "Boschloo's test", Wikipedia,
https://en.wikipedia.org/wiki/Boschloo%27s_test
.. [3] Lise M. Saari et al. "Employee attitudes and job satisfaction",
Human Resource Management, 43(4), 395-407, 2004,
:doi:`10.1002/hrm.20032`.
Examples
--------
In the following example, we consider the article "Employee
attitudes and job satisfaction" [3]_
which reports the results of a survey from 63 scientists and 117 college
professors. Of the 63 scientists, 31 said they were very satisfied with
their jobs, whereas 74 of the college professors were very satisfied
with their work. Is this significant evidence that college
professors are happier with their work than scientists?
The following table summarizes the data mentioned above::
college professors scientists
Very Satisfied 74 31
Dissatisfied 43 32
When working with statistical hypothesis testing, we usually use a
threshold probability or significance level upon which we decide
to reject the null hypothesis :math:`H_0`. Suppose we choose the common
significance level of 5%.
Our alternative hypothesis is that college professors are truly more
satisfied with their work than scientists. Therefore, we expect
:math:`p_1` the proportion of very satisfied college professors to be
greater than :math:`p_2`, the proportion of very satisfied scientists.
We thus call `boschloo_exact` with the ``alternative="greater"`` option:
>>> import scipy.stats as stats
>>> res = stats.boschloo_exact([[74, 31], [43, 32]], alternative="greater")
>>> res.statistic
0.0483
>>> res.pvalue
0.0355
Under the null hypothesis that scientists are happier in their work than
college professors, the probability of obtaining test
results at least as extreme as the observed data is approximately 3.55%.
Since this p-value is less than our chosen significance level, we have
evidence to reject :math:`H_0` in favor of the alternative hypothesis.
"""
hypergeom = distributions.hypergeom
if n <= 0:
raise ValueError(
"Number of points `n` must be strictly positive,"
f" found {n!r}"
)
table = np.asarray(table, dtype=np.int64)
if not table.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(table < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in table.sum(axis=0):
# If both values in column are zero, the p-value is 1 and
# the score's statistic is NaN.
return BoschlooExactResult(np.nan, np.nan)
total_col_1, total_col_2 = table.sum(axis=0)
total = total_col_1 + total_col_2
x1 = np.arange(total_col_1 + 1, dtype=np.int64).reshape(1, -1)
x2 = np.arange(total_col_2 + 1, dtype=np.int64).reshape(-1, 1)
x1_sum_x2 = x1 + x2
if alternative == 'less':
pvalues = hypergeom.cdf(x1, total, x1_sum_x2, total_col_1).T
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalues = hypergeom.cdf(x2, total, x1_sum_x2, total_col_2).T
elif alternative == 'two-sided':
boschloo_less = boschloo_exact(table, alternative="less", n=n)
boschloo_greater = boschloo_exact(table, alternative="greater", n=n)
res = (
boschloo_less if boschloo_less.pvalue < boschloo_greater.pvalue
else boschloo_greater
)
# Two-sided p-value is defined as twice the minimum of the one-sided
# p-values
pvalue = np.clip(2 * res.pvalue, a_min=0, a_max=1)
return BoschlooExactResult(res.statistic, pvalue)
else:
msg = (
f"`alternative` should be one of {'two-sided', 'less', 'greater'},"
f" found {alternative!r}"
)
raise ValueError(msg)
fisher_stat = pvalues[table[0, 0], table[0, 1]]
# fisher_stat * (1+1e-13) guards us from small numerical error. It is
# equivalent to np.isclose with relative tol of 1e-13 and absolute tol of 0
# For more throughout explanations, see gh-14178
index_arr = pvalues <= fisher_stat * (1+1e-13)
x1, x2, x1_sum_x2 = x1.T, x2.T, x1_sum_x2.T
x1_log_comb = _compute_log_combinations(total_col_1)
x2_log_comb = _compute_log_combinations(total_col_2)
x1_sum_x2_log_comb = x1_log_comb[x1] + x2_log_comb[x2]
result = shgo(
_get_binomial_log_p_value_with_nuisance_param,
args=(x1_sum_x2, x1_sum_x2_log_comb, index_arr),
bounds=((0, 1),),
n=n,
sampling_method="sobol",
)
# result.fun is the negative log pvalue and therefore needs to be
# changed before return
p_value = np.clip(np.exp(-result.fun), a_min=0, a_max=1)
return BoschlooExactResult(fisher_stat, p_value)
def _get_binomial_log_p_value_with_nuisance_param(
nuisance_param, x1_sum_x2, x1_sum_x2_log_comb, index_arr
):
r"""
Compute the log pvalue in respect of a nuisance parameter considering
a 2x2 sample space.
Parameters
----------
nuisance_param : float
nuisance parameter used in the computation of the maximisation of
the p-value. Must be between 0 and 1
x1_sum_x2 : ndarray
Sum of x1 and x2 inside barnard_exact
x1_sum_x2_log_comb : ndarray
sum of the log combination of x1 and x2
index_arr : ndarray of boolean
Returns
-------
p_value : float
Return the maximum p-value considering every nuisance parameter
between 0 and 1
Notes
-----
Both Barnard's test and Boschloo's test iterate over a nuisance parameter
:math:`\pi \in [0, 1]` to find the maximum p-value. To search this
maxima, this function return the negative log pvalue with respect to the
nuisance parameter passed in params. This negative log p-value is then
used in `shgo` to find the minimum negative pvalue which is our maximum
pvalue.
Also, to compute the different combination used in the
p-values' computation formula, this function uses `gammaln` which is
more tolerant for large value than `scipy.special.comb`. `gammaln` gives
a log combination. For the little precision loss, performances are
improved a lot.
"""
t1, t2 = x1_sum_x2.shape
n = t1 + t2 - 2
with np.errstate(divide="ignore", invalid="ignore"):
log_nuisance = np.log(
nuisance_param,
out=np.zeros_like(nuisance_param),
where=nuisance_param >= 0,
)
log_1_minus_nuisance = np.log(
1 - nuisance_param,
out=np.zeros_like(nuisance_param),
where=1 - nuisance_param >= 0,
)
nuisance_power_x1_x2 = log_nuisance * x1_sum_x2
nuisance_power_x1_x2[(x1_sum_x2 == 0)[:, :]] = 0
nuisance_power_n_minus_x1_x2 = log_1_minus_nuisance * (n - x1_sum_x2)
nuisance_power_n_minus_x1_x2[(x1_sum_x2 == n)[:, :]] = 0
tmp_log_values_arr = (
x1_sum_x2_log_comb
+ nuisance_power_x1_x2
+ nuisance_power_n_minus_x1_x2
)
tmp_values_from_index = tmp_log_values_arr[index_arr]
# To avoid dividing by zero in log function and getting inf value,
# values are centered according to the max
max_value = tmp_values_from_index.max()
# To have better result's precision, the log pvalue is taken here.
# Indeed, pvalue is included inside [0, 1] interval. Passing the
# pvalue to log makes the interval a lot bigger ([-inf, 0]), and thus
# help us to achieve better precision
with np.errstate(divide="ignore", invalid="ignore"):
log_probs = np.exp(tmp_values_from_index - max_value).sum()
log_pvalue = max_value + np.log(
log_probs,
out=np.full_like(log_probs, -np.inf),
where=log_probs > 0,
)
# Since shgo find the minima, minus log pvalue is returned
return -log_pvalue
@np.vectorize(otypes=[np.float64])
def _pval_cvm_2samp_exact(s, m, n):
"""
Compute the exact p-value of the Cramer-von Mises two-sample test
for a given value s of the test statistic.
m and n are the sizes of the samples.
[1] Y. Xiao, A. Gordon, and A. Yakovlev, "A C++ Program for
the Cramér-Von Mises Two-Sample Test", J. Stat. Soft.,
vol. 17, no. 8, pp. 1-15, Dec. 2006.
[2] T. W. Anderson "On the Distribution of the Two-Sample Cramer-von Mises
Criterion," The Annals of Mathematical Statistics, Ann. Math. Statist.
33(3), 1148-1159, (September, 1962)
"""
# [1, p. 3]
lcm = np.lcm(m, n)
# [1, p. 4], below eq. 3
a = lcm // m
b = lcm // n
# Combine Eq. 9 in [2] with Eq. 2 in [1] and solve for $\zeta$
# Hint: `s` is $U$ in [2], and $T_2$ in [1] is $T$ in [2]
mn = m * n
zeta = lcm ** 2 * (m + n) * (6 * s - mn * (4 * mn - 1)) // (6 * mn ** 2)
# bound maximum value that may appear in `gs` (remember both rows!)
zeta_bound = lcm**2 * (m + n) # bound elements in row 1
combinations = math.comb(m + n, m) # sum of row 2
max_gs = max(zeta_bound, combinations)
dtype = np.min_scalar_type(max_gs)
# the frequency table of $g_{u, v}^+$ defined in [1, p. 6]
gs = ([np.array([[0], [1]], dtype=dtype)]
+ [np.empty((2, 0), dtype=dtype) for _ in range(m)])
for u in range(n + 1):
next_gs = []
tmp = np.empty((2, 0), dtype=dtype)
for v, g in enumerate(gs):
# Calculate g recursively with eq. 11 in [1]. Even though it
# doesn't look like it, this also does 12/13 (all of Algorithm 1).
vi, i0, i1 = np.intersect1d(tmp[0], g[0], return_indices=True)
tmp = np.concatenate([
np.stack([vi, tmp[1, i0] + g[1, i1]]),
np.delete(tmp, i0, 1),
np.delete(g, i1, 1)
], 1)
res = (a * v - b * u) ** 2
tmp[0] += res.astype(dtype)
next_gs.append(tmp)
gs = next_gs
value, freq = gs[m]
return np.float64(np.sum(freq[value >= zeta]) / combinations)
def _pval_cvm_2samp_asymptotic(t, N, nx, ny, k, *, xp):
# compute expected value and variance of T (eq. 11 and 14 in [2])
et = (1 + 1 / N) / 6
vt = (N + 1) * (4 * k * N - 3 * (nx ** 2 + ny ** 2) - 2 * k)
vt = vt / (45 * N ** 2 * 4 * k)
# computed the normalized statistic (eq. 15 in [2])
tn = 1 / 6 + (t - et) / math.sqrt(45 * vt)
# approximate distribution of tn with limiting distribution
# of the one-sample test statistic
# if tn < 0.003, the _cdf_cvm_inf(tn) < 1.28*1e-18, return 1.0 directly
p = xpx.apply_where(tn >= 0.003,
(tn,),
lambda tn: xp.clip(1. - _cdf_cvm_inf(tn, xp=xp), 0.),
fill_value = 1.)
return p
@xp_capabilities(skip_backends=[('cupy', 'needs rankdata'),
('dask.array', 'needs rankdata')],
cpu_only=True, jax_jit=False)
@_axis_nan_policy_factory(CramerVonMisesResult, n_samples=2, too_small=1,
result_to_tuple=_cvm_result_to_tuple)
def cramervonmises_2samp(x, y, method='auto', *, axis=0):
r"""Perform the two-sample Cramér-von Mises test for goodness of fit.
This is the two-sample version of the Cramér-von Mises test ([1]_):
for two independent samples :math:`X_1, ..., X_n` and
:math:`Y_1, ..., Y_m`, the null hypothesis is that the samples
come from the same (unspecified) continuous distribution.
The test statistic :math:`T` is defined as in [1]_:
.. math::
T = \frac{nm}{n+m}\omega^2 =
\frac{U}{n m (n+m)} - \frac{4 m n - 1}{6(m+n)}
where :math:`U` is defined as below, and :math:`\omega^2` is the Cramér-von
Mises criterion. The function :math:`r(\cdot)` here denotes the rank of the
observed values :math:`x_i` and :math:`y_j` within the pooled sample of size
:math:`n + m`, with ties assigned mid-rank values:
.. math::
U = n \sum_{i=1}^n (r(x_i)-i)^2 + m \sum_{j=1}^m (r(y_j)-j)^2
Parameters
----------
x : array_like
A 1-D array of observed values of the random variables :math:`X_i`.
Must contain at least two observations.
y : array_like
A 1-D array of observed values of the random variables :math:`Y_i`.
Must contain at least two observations.
method : {'auto', 'asymptotic', 'exact'}, optional
The method used to compute the p-value, see Notes for details.
The default is 'auto'.
axis : int or tuple of ints, default: 0
If an int or tuple of ints, the axis or axes of the input along which
to compute the statistic. The statistic of each axis-slice (e.g. row)
of the input will appear in a corresponding element of the output.
If ``None``, the input will be raveled before computing the statistic.
Returns
-------
res : object with attributes
statistic : float
Cramér-von Mises statistic :math:`T`.
pvalue : float
The p-value.
See Also
--------
cramervonmises, anderson_ksamp, epps_singleton_2samp, ks_2samp
Notes
-----
.. versionadded:: 1.7.0
The statistic is computed according to equation 9 in [2]_. The
calculation of the p-value depends on the keyword `method`:
- ``asymptotic``: The p-value is approximated by using the limiting
distribution of the test statistic.
- ``exact``: The exact p-value is computed by enumerating all
possible combinations of the test statistic, see [2]_.
If ``method='auto'``, the exact approach is used
if both samples contain equal to or less than 20 observations,
otherwise the asymptotic distribution is used.
If the underlying distribution is not continuous, the p-value is likely to
be conservative (Section 6.2 in [3]_). When ranking the data to compute
the test statistic, midranks are used if there are ties.
References
----------
.. [1] https://en.wikipedia.org/wiki/Cramer-von_Mises_criterion
.. [2] Anderson, T.W. (1962). On the distribution of the two-sample
Cramer-von-Mises criterion. The Annals of Mathematical
Statistics, pp. 1148-1159.
.. [3] Conover, W.J., Practical Nonparametric Statistics, 1971.
Examples
--------
Suppose we wish to test whether two samples generated by
``scipy.stats.norm.rvs`` have the same distribution. We choose a
significance level of alpha=0.05.
>>> import numpy as np
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = stats.norm.rvs(size=100, random_state=rng)
>>> y = stats.norm.rvs(size=70, random_state=rng)
>>> res = stats.cramervonmises_2samp(x, y)
>>> res.statistic, res.pvalue
(0.29376470588235293, 0.1412873014573014)
The p-value exceeds our chosen significance level, so we do not
reject the null hypothesis that the observed samples are drawn from the
same distribution.
For small sample sizes, one can compute the exact p-values:
>>> x = stats.norm.rvs(size=7, random_state=rng)
>>> y = stats.t.rvs(df=2, size=6, random_state=rng)
>>> res = stats.cramervonmises_2samp(x, y, method='exact')
>>> res.statistic, res.pvalue
(0.197802197802198, 0.31643356643356646)
The p-value based on the asymptotic distribution is a good approximation
even though the sample size is small.
>>> res = stats.cramervonmises_2samp(x, y, method='asymptotic')
>>> res.statistic, res.pvalue
(0.197802197802198, 0.2966041181527128)
Independent of the method, one would not reject the null hypothesis at the
chosen significance level in this example.
"""
xp = array_namespace(x, y)
nx = x.shape[-1]
ny = y.shape[-1]
if nx <= 1 or ny <= 1: # only needed for testing / `test_axis_nan_policy`
raise ValueError('x and y must contain at least two observations.')
if method not in ['auto', 'exact', 'asymptotic']:
raise ValueError('method must be either auto, exact or asymptotic.')
if method == 'auto':
if max(nx, ny) > 20:
method = 'asymptotic'
else:
method = 'exact'
# axis=-1 is guaranteed by _axis_nan_policy decorator
xa = xp.sort(x, axis=-1)
ya = xp.sort(y, axis=-1)
# get ranks of x and y in the pooled sample
z = xp.concat([xa, ya], axis=-1)
# in case of ties, use midrank (see [1])
r = scipy.stats.rankdata(z, method='average', axis=-1)
dtype = xp_result_type(x, y, force_floating=True, xp=xp)
r = xp.astype(r, dtype, copy=False)
rx = r[..., :nx]
ry = r[..., nx:]
# compute U (eq. 10 in [2])
u = (nx * xp.sum((rx - xp.arange(1, nx+1, dtype=dtype))**2, axis=-1)
+ ny * xp.sum((ry - xp.arange(1, ny+1, dtype=dtype))**2, axis=-1))
# compute T (eq. 9 in [2])
k, N = nx*ny, nx + ny
t = u / (k*N) - (4*k - 1)/(6*N)
if method == 'exact':
p = xp.asarray(_pval_cvm_2samp_exact(np.asarray(u), nx, ny), dtype=dtype)
else:
p = _pval_cvm_2samp_asymptotic(t, N, nx, ny, k, xp=xp)
t = t[()] if t.ndim == 0 else t
p = p[()] if p.ndim == 0 else p
return CramerVonMisesResult(statistic=t, pvalue=p)
| BoschlooExactResult |
python | celery__celery | celery/app/base.py | {
"start": 8380,
"end": 56699
} | class ____:
"""Celery application.
Arguments:
main (str): Name of the main module if running as `__main__`.
This is used as the prefix for auto-generated task names.
Keyword Arguments:
broker (str): URL of the default broker used.
backend (Union[str, Type[celery.backends.base.Backend]]):
The result store backend class, or the name of the backend
class to use.
Default is the value of the :setting:`result_backend` setting.
autofinalize (bool): If set to False a :exc:`RuntimeError`
will be raised if the task registry or tasks are used before
the app is finalized.
set_as_current (bool): Make this the global current app.
include (List[str]): List of modules every worker should import.
amqp (Union[str, Type[AMQP]]): AMQP object or class name.
events (Union[str, Type[celery.app.events.Events]]): Events object or
class name.
log (Union[str, Type[Logging]]): Log object or class name.
control (Union[str, Type[celery.app.control.Control]]): Control object
or class name.
tasks (Union[str, Type[TaskRegistry]]): A task registry, or the name of
a registry class.
fixups (List[str]): List of fix-up plug-ins (e.g., see
:mod:`celery.fixups.django`).
config_source (Union[str, class]): Take configuration from a class,
or object. Attributes may include any settings described in
the documentation.
task_cls (Union[str, Type[celery.app.task.Task]]): base task class to
use. See :ref:`this section <custom-task-cls-app-wide>` for usage.
"""
#: This is deprecated, use :meth:`reduce_keys` instead
Pickler = AppPickler
SYSTEM = platforms.SYSTEM
IS_macOS, IS_WINDOWS = platforms.IS_macOS, platforms.IS_WINDOWS
#: Name of the `__main__` module. Required for standalone scripts.
#:
#: If set this will be used instead of `__main__` when automatically
#: generating task names.
main = None
#: Custom options for command-line programs.
#: See :ref:`extending-commandoptions`
user_options = None
#: Custom bootsteps to extend and modify the worker.
#: See :ref:`extending-bootsteps`.
steps = None
builtin_fixups = BUILTIN_FIXUPS
amqp_cls = 'celery.app.amqp:AMQP'
backend_cls = None
events_cls = 'celery.app.events:Events'
loader_cls = None
log_cls = 'celery.app.log:Logging'
control_cls = 'celery.app.control:Control'
task_cls = 'celery.app.task:Task'
registry_cls = 'celery.app.registry:TaskRegistry'
#: Thread local storage.
_local = None
_fixups = None
_pool = None
_conf = None
_after_fork_registered = False
#: Signal sent when app is loading configuration.
on_configure = None
#: Signal sent after app has prepared the configuration.
on_after_configure = None
#: Signal sent after app has been finalized.
on_after_finalize = None
#: Signal sent by every new process after fork.
on_after_fork = None
def __init__(self, main=None, loader=None, backend=None,
amqp=None, events=None, log=None, control=None,
set_as_current=True, tasks=None, broker=None, include=None,
changes=None, config_source=None, fixups=None, task_cls=None,
autofinalize=True, namespace=None, strict_typing=True,
**kwargs):
self._local = threading.local()
self._backend_cache = None
self.clock = LamportClock()
self.main = main
self.amqp_cls = amqp or self.amqp_cls
self.events_cls = events or self.events_cls
self.loader_cls = loader or self._get_default_loader()
self.log_cls = log or self.log_cls
self.control_cls = control or self.control_cls
self._custom_task_cls_used = (
# Custom task class provided as argument
bool(task_cls)
# subclass of Celery with a task_cls attribute
or self.__class__ is not Celery and hasattr(self.__class__, 'task_cls')
)
self.task_cls = task_cls or self.task_cls
self.set_as_current = set_as_current
self.registry_cls = symbol_by_name(self.registry_cls)
self.user_options = defaultdict(set)
self.steps = defaultdict(set)
self.autofinalize = autofinalize
self.namespace = namespace
self.strict_typing = strict_typing
self.configured = False
self._config_source = config_source
self._pending_defaults = deque()
self._pending_periodic_tasks = deque()
self.finalized = False
self._finalize_mutex = threading.RLock()
self._pending = deque()
self._tasks = tasks
if not isinstance(self._tasks, TaskRegistry):
self._tasks = self.registry_cls(self._tasks or {})
# If the class defines a custom __reduce_args__ we need to use
# the old way of pickling apps: pickling a list of
# args instead of the new way that pickles a dict of keywords.
self._using_v1_reduce = app_has_custom(self, '__reduce_args__')
# these options are moved to the config to
# simplify pickling of the app object.
self._preconf = changes or {}
self._preconf_set_by_auto = set()
self.__autoset('broker_url', broker)
self.__autoset('result_backend', backend)
self.__autoset('include', include)
for key, value in kwargs.items():
self.__autoset(key, value)
self._conf = Settings(
PendingConfiguration(
self._preconf, self._finalize_pending_conf),
prefix=self.namespace,
keys=(_old_key_to_new, _new_key_to_old),
)
# - Apply fix-ups.
self.fixups = set(self.builtin_fixups) if fixups is None else fixups
# ...store fixup instances in _fixups to keep weakrefs alive.
self._fixups = [symbol_by_name(fixup)(self) for fixup in self.fixups]
if self.set_as_current:
self.set_current()
# Signals
if self.on_configure is None:
# used to be a method pre 4.0
self.on_configure = Signal(name='app.on_configure')
self.on_after_configure = Signal(
name='app.on_after_configure',
providing_args={'source'},
)
self.on_after_finalize = Signal(name='app.on_after_finalize')
self.on_after_fork = Signal(name='app.on_after_fork')
# Boolean signalling, whether fast_trace_task are enabled.
# this attribute is set in celery.worker.trace and checked by celery.worker.request
self.use_fast_trace_task = False
self.on_init()
_register_app(self)
def _get_default_loader(self):
# the --loader command-line argument sets the environment variable.
return (
os.environ.get('CELERY_LOADER') or
self.loader_cls or
'celery.loaders.app:AppLoader'
)
def on_init(self):
"""Optional callback called at init."""
def __autoset(self, key, value):
if value is not None:
self._preconf[key] = value
self._preconf_set_by_auto.add(key)
def set_current(self):
"""Make this the current app for this thread."""
_set_current_app(self)
def set_default(self):
"""Make this the default app for all threads."""
set_default_app(self)
def _ensure_after_fork(self):
if not self._after_fork_registered:
self._after_fork_registered = True
if register_after_fork is not None:
register_after_fork(self, _after_fork_cleanup_app)
def close(self):
"""Clean up after the application.
Only necessary for dynamically created apps, and you should
probably use the :keyword:`with` statement instead.
Example:
>>> with Celery(set_as_current=False) as app:
... with app.connection_for_write() as conn:
... pass
"""
self._pool = None
_deregister_app(self)
def start(self, argv=None):
"""Run :program:`celery` using `argv`.
Uses :data:`sys.argv` if `argv` is not specified.
"""
from celery.bin.celery import celery
celery.params[0].default = self
if argv is None:
argv = sys.argv
try:
celery.main(args=argv, standalone_mode=False)
except Exit as e:
return e.exit_code
finally:
celery.params[0].default = None
def worker_main(self, argv=None):
"""Run :program:`celery worker` using `argv`.
Uses :data:`sys.argv` if `argv` is not specified.
"""
if argv is None:
argv = sys.argv
if 'worker' not in argv:
raise ValueError(
"The worker sub-command must be specified in argv.\n"
"Use app.start() to programmatically start other commands."
)
self.start(argv=argv)
def task(self, *args, **opts):
"""Decorator to create a task class out of any callable.
See :ref:`Task options<task-options>` for a list of the
arguments that can be passed to this decorator.
Examples:
.. code-block:: python
@app.task
def refresh_feed(url):
store_feed(feedparser.parse(url))
with setting extra options:
.. code-block:: python
@app.task(exchange='feeds')
def refresh_feed(url):
return store_feed(feedparser.parse(url))
Note:
App Binding: For custom apps the task decorator will return
a proxy object, so that the act of creating the task is not
performed until the task is used or the task registry is accessed.
If you're depending on binding to be deferred, then you must
not access any attributes on the returned object until the
application is fully set up (finalized).
"""
if USING_EXECV and opts.get('lazy', True):
# When using execv the task in the original module will point to a
# different app, so doing things like 'add.request' will point to
# a different task instance. This makes sure it will always use
# the task instance from the current app.
# Really need a better solution for this :(
from . import shared_task
return shared_task(*args, lazy=False, **opts)
def inner_create_task_cls(shared=True, filter=None, lazy=True, **opts):
_filt = filter
def _create_task_cls(fun):
if shared:
def cons(app):
return app._task_from_fun(fun, **opts)
cons.__name__ = fun.__name__
connect_on_app_finalize(cons)
if not lazy or self.finalized:
ret = self._task_from_fun(fun, **opts)
else:
# return a proxy object that evaluates on first use
ret = PromiseProxy(self._task_from_fun, (fun,), opts,
__doc__=fun.__doc__)
self._pending.append(ret)
if _filt:
return _filt(ret)
return ret
return _create_task_cls
if len(args) == 1:
if callable(args[0]):
return inner_create_task_cls(**opts)(*args)
raise TypeError('argument 1 to @task() must be a callable')
if args:
raise TypeError(
'@task() takes exactly 1 argument ({} given)'.format(
sum([len(args), len(opts)])))
return inner_create_task_cls(**opts)
def type_checker(self, fun, bound=False):
return staticmethod(head_from_fun(fun, bound=bound))
def _task_from_fun(
self,
fun,
name=None,
base=None,
bind=False,
pydantic: bool = False,
pydantic_strict: bool = False,
pydantic_context: typing.Optional[typing.Dict[str, typing.Any]] = None,
pydantic_dump_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = None,
**options,
):
if not self.finalized and not self.autofinalize:
raise RuntimeError('Contract breach: app not finalized')
name = name or self.gen_task_name(fun.__name__, fun.__module__)
base = base or self.Task
if name not in self._tasks:
if pydantic is True:
fun = pydantic_wrapper(self, fun, name, pydantic_strict, pydantic_context, pydantic_dump_kwargs)
run = fun if bind else staticmethod(fun)
task = type(fun.__name__, (base,), dict({
'app': self,
'name': name,
'run': run,
'_decorated': True,
'__doc__': fun.__doc__,
'__module__': fun.__module__,
'__annotations__': fun.__annotations__,
'__header__': self.type_checker(fun, bound=bind),
'__wrapped__': run}, **options))()
# for some reason __qualname__ cannot be set in type()
# so we have to set it here.
try:
task.__qualname__ = fun.__qualname__
except AttributeError:
pass
self._tasks[task.name] = task
task.bind(self) # connects task to this app
add_autoretry_behaviour(task, **options)
else:
task = self._tasks[name]
return task
def register_task(self, task, **options):
"""Utility for registering a task-based class.
Note:
This is here for compatibility with old Celery 1.0
style task classes, you should not need to use this for
new projects.
"""
task = inspect.isclass(task) and task() or task
if not task.name:
task_cls = type(task)
task.name = self.gen_task_name(
task_cls.__name__, task_cls.__module__)
add_autoretry_behaviour(task, **options)
self.tasks[task.name] = task
task._app = self
task.bind(self)
return task
def gen_task_name(self, name, module):
return gen_task_name(self, name, module)
def finalize(self, auto=False):
"""Finalize the app.
This loads built-in tasks, evaluates pending task decorators,
reads configuration, etc.
"""
with self._finalize_mutex:
if not self.finalized:
if auto and not self.autofinalize:
raise RuntimeError('Contract breach: app not finalized')
self.finalized = True
_announce_app_finalized(self)
pending = self._pending
while pending:
maybe_evaluate(pending.popleft())
for task in self._tasks.values():
task.bind(self)
self.on_after_finalize.send(sender=self)
def add_defaults(self, fun):
"""Add default configuration from dict ``d``.
If the argument is a callable function then it will be regarded
as a promise, and it won't be loaded until the configuration is
actually needed.
This method can be compared to:
.. code-block:: pycon
>>> celery.conf.update(d)
with a difference that 1) no copy will be made and 2) the dict will
not be transferred when the worker spawns child processes, so
it's important that the same configuration happens at import time
when pickle restores the object on the other side.
"""
if not callable(fun):
d, fun = fun, lambda: d
if self.configured:
return self._conf.add_defaults(fun())
self._pending_defaults.append(fun)
def config_from_object(self, obj,
silent=False, force=False, namespace=None):
"""Read configuration from object.
Object is either an actual object or the name of a module to import.
Example:
>>> celery.config_from_object('myapp.celeryconfig')
>>> from myapp import celeryconfig
>>> celery.config_from_object(celeryconfig)
Arguments:
silent (bool): If true then import errors will be ignored.
force (bool): Force reading configuration immediately.
By default the configuration will be read only when required.
"""
self._config_source = obj
self.namespace = namespace or self.namespace
if force or self.configured:
self._conf = None
if self.loader.config_from_object(obj, silent=silent):
return self.conf
def config_from_envvar(self, variable_name, silent=False, force=False):
"""Read configuration from environment variable.
The value of the environment variable must be the name
of a module to import.
Example:
>>> os.environ['CELERY_CONFIG_MODULE'] = 'myapp.celeryconfig'
>>> celery.config_from_envvar('CELERY_CONFIG_MODULE')
"""
module_name = os.environ.get(variable_name)
if not module_name:
if silent:
return False
raise ImproperlyConfigured(
ERR_ENVVAR_NOT_SET.strip().format(variable_name))
return self.config_from_object(module_name, silent=silent, force=force)
def config_from_cmdline(self, argv, namespace='celery'):
self._conf.update(
self.loader.cmdline_config_parser(argv, namespace)
)
def setup_security(self, allowed_serializers=None, key=None, key_password=None, cert=None,
store=None, digest=DEFAULT_SECURITY_DIGEST,
serializer='json'):
"""Setup the message-signing serializer.
This will affect all application instances (a global operation).
Disables untrusted serializers and if configured to use the ``auth``
serializer will register the ``auth`` serializer with the provided
settings into the Kombu serializer registry.
Arguments:
allowed_serializers (Set[str]): List of serializer names, or
content_types that should be exempt from being disabled.
key (str): Name of private key file to use.
Defaults to the :setting:`security_key` setting.
key_password (bytes): Password to decrypt the private key.
Defaults to the :setting:`security_key_password` setting.
cert (str): Name of certificate file to use.
Defaults to the :setting:`security_certificate` setting.
store (str): Directory containing certificates.
Defaults to the :setting:`security_cert_store` setting.
digest (str): Digest algorithm used when signing messages.
Default is ``sha256``.
serializer (str): Serializer used to encode messages after
they've been signed. See :setting:`task_serializer` for
the serializers supported. Default is ``json``.
"""
from celery.security import setup_security
return setup_security(allowed_serializers, key, key_password, cert,
store, digest, serializer, app=self)
def autodiscover_tasks(self, packages=None,
related_name='tasks', force=False):
"""Auto-discover task modules.
Searches a list of packages for a "tasks.py" module (or use
related_name argument).
If the name is empty, this will be delegated to fix-ups (e.g., Django).
For example if you have a directory layout like this:
.. code-block:: text
foo/__init__.py
tasks.py
models.py
bar/__init__.py
tasks.py
models.py
baz/__init__.py
models.py
Then calling ``app.autodiscover_tasks(['foo', 'bar', 'baz'])`` will
result in the modules ``foo.tasks`` and ``bar.tasks`` being imported.
Arguments:
packages (List[str]): List of packages to search.
This argument may also be a callable, in which case the
value returned is used (for lazy evaluation).
related_name (Optional[str]): The name of the module to find. Defaults
to "tasks": meaning "look for 'module.tasks' for every
module in ``packages``.". If ``None`` will only try to import
the package, i.e. "look for 'module'".
force (bool): By default this call is lazy so that the actual
auto-discovery won't happen until an application imports
the default modules. Forcing will cause the auto-discovery
to happen immediately.
"""
if force:
return self._autodiscover_tasks(packages, related_name)
signals.import_modules.connect(starpromise(
self._autodiscover_tasks, packages, related_name,
), weak=False, sender=self)
def _autodiscover_tasks(self, packages, related_name, **kwargs):
if packages:
return self._autodiscover_tasks_from_names(packages, related_name)
return self._autodiscover_tasks_from_fixups(related_name)
def _autodiscover_tasks_from_names(self, packages, related_name):
# packages argument can be lazy
return self.loader.autodiscover_tasks(
packages() if callable(packages) else packages, related_name,
)
def _autodiscover_tasks_from_fixups(self, related_name):
return self._autodiscover_tasks_from_names([
pkg for fixup in self._fixups
if hasattr(fixup, 'autodiscover_tasks')
for pkg in fixup.autodiscover_tasks()
], related_name=related_name)
def send_task(self, name, args=None, kwargs=None, countdown=None,
eta=None, task_id=None, producer=None, connection=None,
router=None, result_cls=None, expires=None,
publisher=None, link=None, link_error=None,
add_to_parent=True, group_id=None, group_index=None,
retries=0, chord=None,
reply_to=None, time_limit=None, soft_time_limit=None,
root_id=None, parent_id=None, route_name=None,
shadow=None, chain=None, task_type=None, replaced_task_nesting=0, **options):
"""Send task by name.
Supports the same arguments as :meth:`@-Task.apply_async`.
Arguments:
name (str): Name of task to call (e.g., `"tasks.add"`).
result_cls (AsyncResult): Specify custom result class.
"""
parent = have_parent = None
amqp = self.amqp
task_id = task_id or uuid()
producer = producer or publisher # XXX compat
router = router or amqp.router
conf = self.conf
if conf.task_always_eager: # pragma: no cover
warnings.warn(AlwaysEagerIgnored(
'task_always_eager has no effect on send_task',
), stacklevel=2)
ignore_result = options.pop('ignore_result', False)
options = router.route(
options, route_name or name, args, kwargs, task_type)
driver_type = self.producer_pool.connections.connection.transport.driver_type
if (eta or countdown) and detect_quorum_queues(self, driver_type)[0]:
queue = options.get("queue")
exchange_type = queue.exchange.type if queue else options["exchange_type"]
routing_key = queue.routing_key if queue else options["routing_key"]
exchange_name = queue.exchange.name if queue else options["exchange"]
if exchange_type != 'direct':
if eta:
if isinstance(eta, str):
eta = isoparse(eta)
countdown = (maybe_make_aware(eta) - self.now()).total_seconds()
if countdown:
if countdown > 0:
routing_key = calculate_routing_key(int(countdown), routing_key)
exchange = Exchange(
'celery_delayed_27',
type='topic',
)
options.pop("queue", None)
options['routing_key'] = routing_key
options['exchange'] = exchange
else:
logger.warning(
'Direct exchanges are not supported with native delayed delivery.\n'
f'{exchange_name} is a direct exchange but should be a topic exchange or '
'a fanout exchange in order for native delayed delivery to work properly.\n'
'If quorum queues are used, this task may block the worker process until the ETA arrives.'
)
if expires is not None:
if isinstance(expires, datetime):
expires_s = (maybe_make_aware(
expires) - self.now()).total_seconds()
elif isinstance(expires, str):
expires_s = (maybe_make_aware(
isoparse(expires)) - self.now()).total_seconds()
else:
expires_s = expires
if expires_s < 0:
logger.warning(
f"{task_id} has an expiration date in the past ({-expires_s}s ago).\n"
"We assume this is intended and so we have set the "
"expiration date to 0 instead.\n"
"According to RabbitMQ's documentation:\n"
"\"Setting the TTL to 0 causes messages to be expired upon "
"reaching a queue unless they can be delivered to a "
"consumer immediately.\"\n"
"If this was unintended, please check the code which "
"published this task."
)
expires_s = 0
options["expiration"] = expires_s
if not root_id or not parent_id:
parent = self.current_worker_task
if parent:
if not root_id:
root_id = parent.request.root_id or parent.request.id
if not parent_id:
parent_id = parent.request.id
if conf.task_inherit_parent_priority:
options.setdefault('priority',
parent.request.delivery_info.get('priority'))
# alias for 'task_as_v2'
message = amqp.create_task_message(
task_id, name, args, kwargs, countdown, eta, group_id, group_index,
expires, retries, chord,
maybe_list(link), maybe_list(link_error),
reply_to or self.thread_oid, time_limit, soft_time_limit,
self.conf.task_send_sent_event,
root_id, parent_id, shadow, chain,
ignore_result=ignore_result,
replaced_task_nesting=replaced_task_nesting, **options
)
stamped_headers = options.pop('stamped_headers', [])
for stamp in stamped_headers:
options.pop(stamp)
if connection:
producer = amqp.Producer(connection, auto_declare=False)
with self.producer_or_acquire(producer) as P:
with P.connection._reraise_as_library_errors():
if not ignore_result:
self.backend.on_task_call(P, task_id)
amqp.send_task_message(P, name, message, **options)
result = (result_cls or self.AsyncResult)(task_id)
# We avoid using the constructor since a custom result class
# can be used, in which case the constructor may still use
# the old signature.
result.ignored = ignore_result
if add_to_parent:
if not have_parent:
parent, have_parent = self.current_worker_task, True
if parent:
parent.add_trail(result)
return result
def connection_for_read(self, url=None, **kwargs):
"""Establish connection used for consuming.
See Also:
:meth:`connection` for supported arguments.
"""
return self._connection(url or self.conf.broker_read_url, **kwargs)
def connection_for_write(self, url=None, **kwargs):
"""Establish connection used for producing.
See Also:
:meth:`connection` for supported arguments.
"""
return self._connection(url or self.conf.broker_write_url, **kwargs)
def connection(self, hostname=None, userid=None, password=None,
virtual_host=None, port=None, ssl=None,
connect_timeout=None, transport=None,
transport_options=None, heartbeat=None,
login_method=None, failover_strategy=None, **kwargs):
"""Establish a connection to the message broker.
Please use :meth:`connection_for_read` and
:meth:`connection_for_write` instead, to convey the intent
of use for this connection.
Arguments:
url: Either the URL or the hostname of the broker to use.
hostname (str): URL, Hostname/IP-address of the broker.
If a URL is used, then the other argument below will
be taken from the URL instead.
userid (str): Username to authenticate as.
password (str): Password to authenticate with
virtual_host (str): Virtual host to use (domain).
port (int): Port to connect to.
ssl (bool, Dict): Defaults to the :setting:`broker_use_ssl`
setting.
transport (str): defaults to the :setting:`broker_transport`
setting.
transport_options (Dict): Dictionary of transport specific options.
heartbeat (int): AMQP Heartbeat in seconds (``pyamqp`` only).
login_method (str): Custom login method to use (AMQP only).
failover_strategy (str, Callable): Custom failover strategy.
**kwargs: Additional arguments to :class:`kombu.Connection`.
Returns:
kombu.Connection: the lazy connection instance.
"""
return self.connection_for_write(
hostname or self.conf.broker_write_url,
userid=userid, password=password,
virtual_host=virtual_host, port=port, ssl=ssl,
connect_timeout=connect_timeout, transport=transport,
transport_options=transport_options, heartbeat=heartbeat,
login_method=login_method, failover_strategy=failover_strategy,
**kwargs
)
def _connection(self, url, userid=None, password=None,
virtual_host=None, port=None, ssl=None,
connect_timeout=None, transport=None,
transport_options=None, heartbeat=None,
login_method=None, failover_strategy=None, **kwargs):
conf = self.conf
return self.amqp.Connection(
url,
userid or conf.broker_user,
password or conf.broker_password,
virtual_host or conf.broker_vhost,
port or conf.broker_port,
transport=transport or conf.broker_transport,
ssl=self.either('broker_use_ssl', ssl),
heartbeat=heartbeat,
login_method=login_method or conf.broker_login_method,
failover_strategy=(
failover_strategy or conf.broker_failover_strategy
),
transport_options=dict(
conf.broker_transport_options, **transport_options or {}
),
connect_timeout=self.either(
'broker_connection_timeout', connect_timeout
),
)
broker_connection = connection
def _acquire_connection(self, pool=True):
"""Helper for :meth:`connection_or_acquire`."""
if pool:
return self.pool.acquire(block=True)
return self.connection_for_write()
def connection_or_acquire(self, connection=None, pool=True, *_, **__):
"""Context used to acquire a connection from the pool.
For use within a :keyword:`with` statement to get a connection
from the pool if one is not already provided.
Arguments:
connection (kombu.Connection): If not provided, a connection
will be acquired from the connection pool.
"""
return FallbackContext(connection, self._acquire_connection, pool=pool)
default_connection = connection_or_acquire # XXX compat
def producer_or_acquire(self, producer=None):
"""Context used to acquire a producer from the pool.
For use within a :keyword:`with` statement to get a producer
from the pool if one is not already provided
Arguments:
producer (kombu.Producer): If not provided, a producer
will be acquired from the producer pool.
"""
return FallbackContext(
producer, self.producer_pool.acquire, block=True,
)
default_producer = producer_or_acquire # XXX compat
def prepare_config(self, c):
"""Prepare configuration before it is merged with the defaults."""
return find_deprecated_settings(c)
def now(self):
"""Return the current time and date as a datetime."""
now_in_utc = to_utc(datetime.now(datetime_timezone.utc))
return now_in_utc.astimezone(self.timezone)
def select_queues(self, queues=None):
"""Select subset of queues.
Arguments:
queues (Sequence[str]): a list of queue names to keep.
"""
return self.amqp.queues.select(queues)
def either(self, default_key, *defaults):
"""Get key from configuration or use default values.
Fallback to the value of a configuration key if none of the
`*values` are true.
"""
return first(None, [
first(None, defaults), starpromise(self.conf.get, default_key),
])
def bugreport(self):
"""Return information useful in bug reports."""
return bugreport(self)
def _get_backend(self):
backend, url = backends.by_url(
self.backend_cls or self.conf.result_backend,
self.loader)
return backend(app=self, url=url)
def _finalize_pending_conf(self):
"""Get config value by key and finalize loading the configuration.
Note:
This is used by PendingConfiguration:
as soon as you access a key the configuration is read.
"""
try:
conf = self._conf = self._load_config()
except AttributeError as err:
# AttributeError is not propagated, it is "handled" by
# PendingConfiguration parent class. This causes
# confusing RecursionError.
raise ModuleNotFoundError(*err.args) from err
return conf
def _load_config(self):
if isinstance(self.on_configure, Signal):
self.on_configure.send(sender=self)
else:
# used to be a method pre 4.0
self.on_configure()
if self._config_source:
self.loader.config_from_object(self._config_source)
self.configured = True
settings = detect_settings(
self.prepare_config(self.loader.conf), self._preconf,
ignore_keys=self._preconf_set_by_auto, prefix=self.namespace,
)
if self._conf is not None:
# replace in place, as someone may have referenced app.conf,
# done some changes, accessed a key, and then try to make more
# changes to the reference and not the finalized value.
self._conf.swap_with(settings)
else:
self._conf = settings
# load lazy config dict initializers.
pending_def = self._pending_defaults
while pending_def:
self._conf.add_defaults(maybe_evaluate(pending_def.popleft()()))
# load lazy periodic tasks
pending_beat = self._pending_periodic_tasks
while pending_beat:
periodic_task_args, periodic_task_kwargs = pending_beat.popleft()
self._add_periodic_task(*periodic_task_args, **periodic_task_kwargs)
self.on_after_configure.send(sender=self, source=self._conf)
return self._conf
def _after_fork(self):
self._pool = None
try:
self.__dict__['amqp']._producer_pool = None
except (AttributeError, KeyError):
pass
self.on_after_fork.send(sender=self)
def signature(self, *args, **kwargs):
"""Return a new :class:`~celery.Signature` bound to this app."""
kwargs['app'] = self
return self._canvas.signature(*args, **kwargs)
def add_periodic_task(self, schedule, sig,
args=(), kwargs=(), name=None, **opts):
"""
Add a periodic task to beat schedule.
Celery beat store tasks based on `sig` or `name` if provided. Adding the
same signature twice make the second task override the first one. To
avoid the override, use distinct `name` for them.
"""
key, entry = self._sig_to_periodic_task_entry(
schedule, sig, args, kwargs, name, **opts)
if self.configured:
self._add_periodic_task(key, entry, name=name)
else:
self._pending_periodic_tasks.append([(key, entry), {"name": name}])
return key
def _sig_to_periodic_task_entry(self, schedule, sig,
args=(), kwargs=None, name=None, **opts):
kwargs = {} if not kwargs else kwargs
sig = (sig.clone(args, kwargs)
if isinstance(sig, abstract.CallableSignature)
else self.signature(sig.name, args, kwargs))
return name or repr(sig), {
'schedule': schedule,
'task': sig.name,
'args': sig.args,
'kwargs': sig.kwargs,
'options': dict(sig.options, **opts),
}
def _add_periodic_task(self, key, entry, name=None):
if name is None and key in self._conf.beat_schedule:
logger.warning(
f"Periodic task key='{key}' shadowed a previous unnamed periodic task."
" Pass a name kwarg to add_periodic_task to silence this warning."
)
self._conf.beat_schedule[key] = entry
def create_task_cls(self):
"""Create a base task class bound to this app."""
return self.subclass_with_self(
self.task_cls, name='Task', attribute='_app',
keep_reduce=True, abstract=True,
)
def subclass_with_self(self, Class, name=None, attribute='app',
reverse=None, keep_reduce=False, **kw):
"""Subclass an app-compatible class.
App-compatible means that the class has a class attribute that
provides the default app it should use, for example:
``class Foo: app = None``.
Arguments:
Class (type): The app-compatible class to subclass.
name (str): Custom name for the target class.
attribute (str): Name of the attribute holding the app,
Default is 'app'.
reverse (str): Reverse path to this object used for pickling
purposes. For example, to get ``app.AsyncResult``,
use ``"AsyncResult"``.
keep_reduce (bool): If enabled a custom ``__reduce__``
implementation won't be provided.
"""
Class = symbol_by_name(Class)
reverse = reverse if reverse else Class.__name__
def __reduce__(self):
return _unpickle_appattr, (reverse, self.__reduce_args__())
attrs = dict(
{attribute: self},
__module__=Class.__module__,
__doc__=Class.__doc__,
**kw)
if not keep_reduce:
attrs['__reduce__'] = __reduce__
return type(name or Class.__name__, (Class,), attrs)
def _rgetattr(self, path):
return attrgetter(path)(self)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __repr__(self):
return f'<{type(self).__name__} {appstr(self)}>'
def __reduce__(self):
if self._using_v1_reduce:
return self.__reduce_v1__()
return (_unpickle_app_v2, (self.__class__, self.__reduce_keys__()))
def __reduce_v1__(self):
# Reduce only pickles the configuration changes,
# so the default configuration doesn't have to be passed
# between processes.
return (
_unpickle_app,
(self.__class__, self.Pickler) + self.__reduce_args__(),
)
def __reduce_keys__(self):
"""Keyword arguments used to reconstruct the object when unpickling."""
return {
'main': self.main,
'changes':
self._conf.changes if self.configured else self._preconf,
'loader': self.loader_cls,
'backend': self.backend_cls,
'amqp': self.amqp_cls,
'events': self.events_cls,
'log': self.log_cls,
'control': self.control_cls,
'fixups': self.fixups,
'config_source': self._config_source,
'task_cls': self.task_cls,
'namespace': self.namespace,
}
def __reduce_args__(self):
"""Deprecated method, please use :meth:`__reduce_keys__` instead."""
return (self.main, self._conf.changes if self.configured else {},
self.loader_cls, self.backend_cls, self.amqp_cls,
self.events_cls, self.log_cls, self.control_cls,
False, self._config_source)
@cached_property
def Worker(self):
"""Worker application.
See Also:
:class:`~@Worker`.
"""
return self.subclass_with_self('celery.apps.worker:Worker')
@cached_property
def WorkController(self, **kwargs):
"""Embeddable worker.
See Also:
:class:`~@WorkController`.
"""
return self.subclass_with_self('celery.worker:WorkController')
@cached_property
def Beat(self, **kwargs):
""":program:`celery beat` scheduler application.
See Also:
:class:`~@Beat`.
"""
return self.subclass_with_self('celery.apps.beat:Beat')
@cached_property
def Task(self):
"""Base task class for this app."""
return self.create_task_cls()
@cached_property
def annotations(self):
return prepare_annotations(self.conf.task_annotations)
@cached_property
def AsyncResult(self):
"""Create new result instance.
See Also:
:class:`celery.result.AsyncResult`.
"""
return self.subclass_with_self('celery.result:AsyncResult')
@cached_property
def ResultSet(self):
return self.subclass_with_self('celery.result:ResultSet')
@cached_property
def GroupResult(self):
"""Create new group result instance.
See Also:
:class:`celery.result.GroupResult`.
"""
return self.subclass_with_self('celery.result:GroupResult')
@property
def pool(self):
"""Broker connection pool: :class:`~@pool`.
Note:
This attribute is not related to the workers concurrency pool.
"""
if self._pool is None:
self._ensure_after_fork()
limit = self.conf.broker_pool_limit
pools.set_limit(limit)
self._pool = pools.connections[self.connection_for_write()]
return self._pool
@property
def current_task(self):
"""Instance of task being executed, or :const:`None`."""
return _task_stack.top
@property
def current_worker_task(self):
"""The task currently being executed by a worker or :const:`None`.
Differs from :data:`current_task` in that it's not affected
by tasks calling other tasks directly, or eagerly.
"""
return get_current_worker_task()
@cached_property
def oid(self):
"""Universally unique identifier for this app."""
# since 4.0: thread.get_ident() is not included when
# generating the process id. This is due to how the RPC
# backend now dedicates a single thread to receive results,
# which would not work if each thread has a separate id.
return oid_from(self, threads=False)
@property
def thread_oid(self):
"""Per-thread unique identifier for this app."""
try:
return self._local.oid
except AttributeError:
self._local.oid = new_oid = oid_from(self, threads=True)
return new_oid
@cached_property
def amqp(self):
"""AMQP related functionality: :class:`~@amqp`."""
return instantiate(self.amqp_cls, app=self)
@property
def _backend(self):
"""A reference to the backend object
Uses self._backend_cache if it is thread safe.
Otherwise, use self._local
"""
if self._backend_cache is not None:
return self._backend_cache
return getattr(self._local, "backend", None)
@_backend.setter
def _backend(self, backend):
"""Set the backend object on the app"""
if backend.thread_safe:
self._backend_cache = backend
else:
self._local.backend = backend
@property
def backend(self):
"""Current backend instance."""
if self._backend is None:
self._backend = self._get_backend()
return self._backend
@property
def conf(self):
"""Current configuration."""
if self._conf is None:
self._conf = self._load_config()
return self._conf
@conf.setter
def conf(self, d):
self._conf = d
@cached_property
def control(self):
"""Remote control: :class:`~@control`."""
return instantiate(self.control_cls, app=self)
@cached_property
def events(self):
"""Consuming and sending events: :class:`~@events`."""
return instantiate(self.events_cls, app=self)
@cached_property
def loader(self):
"""Current loader instance."""
return get_loader_cls(self.loader_cls)(app=self)
@cached_property
def log(self):
"""Logging: :class:`~@log`."""
return instantiate(self.log_cls, app=self)
@cached_property
def _canvas(self):
from celery import canvas
return canvas
@cached_property
def tasks(self):
"""Task registry.
Warning:
Accessing this attribute will also auto-finalize the app.
"""
self.finalize(auto=True)
return self._tasks
@property
def producer_pool(self):
return self.amqp.producer_pool
def uses_utc_timezone(self):
"""Check if the application uses the UTC timezone."""
return self.timezone == timezone.utc
@cached_property
def timezone(self):
"""Current timezone for this app.
This is a cached property taking the time zone from the
:setting:`timezone` setting.
"""
conf = self.conf
if not conf.timezone:
if conf.enable_utc:
return timezone.utc
else:
return timezone.local
return timezone.get_timezone(conf.timezone)
App = Celery # XXX compat
| Celery |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1463661,
"end": 1464056
} | class ____(sgqlc.types.Type, Node):
"""A repository rule."""
__schema__ = github_schema
__field_names__ = ("parameters", "type")
parameters = sgqlc.types.Field("RuleParameters", graphql_name="parameters")
"""The parameters for this rule."""
type = sgqlc.types.Field(sgqlc.types.non_null(RepositoryRuleType), graphql_name="type")
"""The type of rule."""
| RepositoryRule |
python | apache__thrift | test/py/TestClient.py | {
"start": 13888,
"end": 14405
} | class ____(MultiplexedOptionalTest):
def get_protocol(self, transport):
wrapped_proto = make_pedantic(TBinaryProtocol.TBinaryProtocolFactory().getProtocol(transport))
return TMultiplexedProtocol.TMultiplexedProtocol(wrapped_proto, "ThriftTest")
def get_protocol2(self, transport):
wrapped_proto = make_pedantic(TBinaryProtocol.TBinaryProtocolFactory().getProtocol(transport))
return TMultiplexedProtocol.TMultiplexedProtocol(wrapped_proto, "SecondService")
| MultiplexedBinaryTest |
python | google__pytype | pytype/rewrite/abstract/internal.py | {
"start": 184,
"end": 745
} | class ____(base.BaseValue):
"""Representation of a function arg tuple."""
def __init__(
self,
ctx: base.ContextType,
constant: tuple[_Var, ...] = (),
indefinite: bool = False,
):
super().__init__(ctx)
assert isinstance(constant, tuple), constant
self.constant = constant
self.indefinite = indefinite
def __repr__(self):
indef = "+" if self.indefinite else ""
return f"FunctionArgTuple({indef}{self.constant!r})"
@property
def _attrs(self):
return (self.constant, self.indefinite)
| FunctionArgTuple |
python | walkccc__LeetCode | solutions/1171. Remove Zero Sum Consecutive Nodes from Linked List/1171.py | {
"start": 0,
"end": 418
} | class ____:
def removeZeroSumSublists(self, head: ListNode) -> ListNode:
dummy = ListNode(0, head)
prefix = 0
prefixToNode = {0: dummy}
while head:
prefix += head.val
prefixToNode[prefix] = head
head = head.next
prefix = 0
head = dummy
while head:
prefix += head.val
head.next = prefixToNode[prefix].next
head = head.next
return dummy.next
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_20/auth.py | {
"start": 22451,
"end": 23965
} | class ____(Response):
"""
Response of auth.revoke_credentials endpoint.
:param revoked: Number of credentials revoked
:type revoked: int
"""
_service = "auth"
_action = "revoke_credentials"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"revoked": {
"description": "Number of credentials revoked",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, revoked: Optional[int] = None, **kwargs: Any) -> None:
super(RevokeCredentialsResponse, self).__init__(**kwargs)
self.revoked = revoked
@schema_property("revoked")
def revoked(self) -> Optional[int]:
return self._property_revoked
@revoked.setter
def revoked(self, value: Optional[int]) -> None:
if value is None:
self._property_revoked = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "revoked", six.integer_types)
self._property_revoked = value
response_mapping = {
LoginRequest: LoginResponse,
CreateCredentialsRequest: CreateCredentialsResponse,
GetCredentialsRequest: GetCredentialsResponse,
EditCredentialsRequest: EditCredentialsResponse,
RevokeCredentialsRequest: RevokeCredentialsResponse,
EditUserRequest: EditUserResponse,
}
| RevokeCredentialsResponse |
python | pytest-dev__pytest | src/_pytest/capture.py | {
"start": 5810,
"end": 6323
} | class ____(io.TextIOWrapper):
__slots__ = ()
@property
def name(self) -> str:
# Ensure that file.name is a string. Workaround for a Python bug
# fixed in >=3.7.4: https://bugs.python.org/issue36015
return repr(self.buffer)
@property
def mode(self) -> str:
# TextIOWrapper doesn't expose a mode, but at least some of our
# tests check it.
assert hasattr(self.buffer, "mode")
return cast(str, self.buffer.mode.replace("b", ""))
| EncodedFile |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/inputs.py | {
"start": 1188,
"end": 3127
} | class ____(graphene.InputObjectType):
runIds = graphene.List(graphene.String)
pipelineName = graphene.InputField(graphene.String)
tags = graphene.List(graphene.NonNull(GrapheneExecutionTag))
statuses = graphene.List(graphene.NonNull(GrapheneRunStatus))
snapshotId = graphene.InputField(graphene.String)
updatedAfter = graphene.InputField(graphene.Float)
updatedBefore = graphene.InputField(graphene.Float)
createdBefore = graphene.InputField(graphene.Float)
createdAfter = graphene.InputField(graphene.Float)
mode = graphene.InputField(graphene.String)
class Meta:
description = """This type represents a filter on Dagster runs."""
name = "RunsFilter"
def to_selector(self):
if self.tags:
# We are wrapping self.tags in a list because graphene.List is not marked as iterable
tags = {tag["key"]: tag["value"] for tag in list(self.tags)}
else:
tags = None
if self.statuses:
statuses = [DagsterRunStatus[status.value] for status in self.statuses]
else:
statuses = None
updated_before = datetime_from_timestamp(self.updatedBefore) if self.updatedBefore else None
updated_after = datetime_from_timestamp(self.updatedAfter) if self.updatedAfter else None
created_before = datetime_from_timestamp(self.createdBefore) if self.createdBefore else None
created_after = datetime_from_timestamp(self.createdAfter) if self.createdAfter else None
return RunsFilter(
run_ids=self.runIds if self.runIds else None,
job_name=self.pipelineName,
tags=tags,
statuses=statuses,
snapshot_id=self.snapshotId,
updated_before=updated_before,
updated_after=updated_after,
created_before=created_before,
created_after=created_after,
)
| GrapheneRunsFilter |
python | jschneier__django-storages | storages/backends/s3.py | {
"start": 3218,
"end": 10881
} | class ____(CompressedFileMixin, File):
"""
The default file object used by the S3Storage backend.
This file implements file streaming using boto's multipart
uploading functionality. The file can be opened in read or
write mode.
This class extends Django's File class. However, the contained
data is only the data contained in the current buffer. So you
should not access the contained file object directly. You should
access the data via this class.
Warning: This file *must* be closed using the close() method in
order to properly write the file to S3. Be sure to close the file
in your application.
"""
def __init__(self, name, mode, storage, buffer_size=None):
if "r" in mode and "w" in mode:
raise ValueError("Can't combine 'r' and 'w' in mode.")
self._storage = storage
self.name = name[len(self._storage.location) :].lstrip("/")
self._mode = mode
self.obj = storage.bucket.Object(name)
if "w" not in mode:
# Force early RAII-style exception if object does not exist
params = _filter_download_params(
self._storage.get_object_parameters(self.name)
)
self.obj.load(**params)
self._closed = False
self._file = None
self._parts = None
# 5 MB is the minimum part size (if there is more than one part).
# Amazon allows up to 10,000 parts. The default supports uploads
# up to roughly 50 GB. Increase the part size to accommodate
# for files larger than this.
self.buffer_size = buffer_size or setting("AWS_S3_FILE_BUFFER_SIZE", 5242880)
self._reset_file_properties()
def _reset_file_properties(self):
self._multipart = None
self._raw_bytes_written = 0
self._write_counter = 0
self._is_dirty = False
def open(self, mode=None):
if self._file is not None and not self.closed:
self.seek(0) # Mirror Django's behavior
elif mode and mode != self._mode:
raise ValueError("Cannot reopen file with a new mode.")
# Accessing the file will functionally re-open it
self.file # noqa: B018
return self
@property
def size(self):
return self.obj.content_length
@property
def closed(self):
return self._closed
def _get_file(self):
if self._file is None:
self._file = tempfile.SpooledTemporaryFile(
max_size=self._storage.max_memory_size,
suffix=".S3File",
dir=setting("FILE_UPLOAD_TEMP_DIR"),
)
if "r" in self._mode:
self._is_dirty = False
params = _filter_download_params(
self._storage.get_object_parameters(self.name)
)
self.obj.download_fileobj(
self._file, ExtraArgs=params, Config=self._storage.transfer_config
)
self._file.seek(0)
if self._storage.gzip and self.obj.content_encoding == "gzip":
self._file = self._decompress_file(mode=self._mode, file=self._file)
elif "b" not in self._mode:
if hasattr(self._file, "readable"):
# For versions > Python 3.10 compatibility
# See SpooledTemporaryFile changes in 3.11 (https://docs.python.org/3/library/tempfile.html) # noqa: E501
# Now fully implements the io.BufferedIOBase and io.TextIOBase abstract base classes allowing the file # noqa: E501
# to be readable in the mode that it was specified (without accessing the underlying _file object). # noqa: E501
# In this case, we need to wrap the file in a TextIOWrapper to ensure that the file is read as a text file. # noqa: E501
self._file = io.TextIOWrapper(self._file, encoding="utf-8")
else:
# For versions <= Python 3.10 compatibility
self._file = io.TextIOWrapper(
self._file._file, encoding="utf-8"
)
self._closed = False
return self._file
def _set_file(self, value):
self._file = value
file = property(_get_file, _set_file)
def read(self, *args, **kwargs):
if "r" not in self._mode:
raise AttributeError("File was not opened in read mode.")
return super().read(*args, **kwargs)
def readline(self, *args, **kwargs):
if "r" not in self._mode:
raise AttributeError("File was not opened in read mode.")
return super().readline(*args, **kwargs)
def readlines(self):
return list(self)
def write(self, content):
if "w" not in self._mode:
raise AttributeError("File was not opened in write mode.")
self._is_dirty = True
if self._multipart is None:
self._multipart = self.obj.initiate_multipart_upload(
**self._storage._get_write_parameters(self.obj.key)
)
self._parts = []
if self.buffer_size <= self._buffer_file_size:
self._flush_write_buffer()
bstr = to_bytes(content)
self._raw_bytes_written += len(bstr)
return super().write(bstr)
@property
def _buffer_file_size(self):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
length = self.file.tell()
self.file.seek(pos)
return length
def _flush_write_buffer(self):
if self._buffer_file_size:
self._write_counter += 1
self.file.seek(0)
part = self._multipart.Part(self._write_counter)
response = part.upload(Body=self.file.read())
self._parts.append(
{"ETag": response["ETag"], "PartNumber": self._write_counter}
)
self.file.seek(0)
self.file.truncate()
def _create_empty_on_close(self):
"""
Attempt to create an empty file for this key when this File is closed if no
bytes have been written and no object already exists on S3 for this key.
This behavior is meant to mimic the behavior of Django's builtin
FileSystemStorage, where files are always created after they are opened in
write mode:
f = storage.open('file.txt', mode='w')
f.close()
"""
assert "w" in self._mode
assert self._raw_bytes_written == 0
try:
# Check if the object exists on the server; if so, don't do anything
self.obj.load()
except ClientError as err:
if err.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
self.obj.put(
Body=b"", **self._storage._get_write_parameters(self.obj.key)
)
else:
raise
def close(self):
if self._is_dirty:
if self._multipart is not None:
self._flush_write_buffer()
self._multipart.complete(MultipartUpload={"Parts": self._parts})
else:
if self._multipart is not None:
self._multipart.abort()
if "w" in self._mode and self._raw_bytes_written == 0:
self._create_empty_on_close()
if self._file is not None:
self._file.close()
self._file = None
self._reset_file_properties()
self._closed = True
@deconstructible
| S3File |
python | django__django | tests/auth_tests/test_models.py | {
"start": 13844,
"end": 19751
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
content_type = ContentType.objects.get_for_model(Group)
cls.permission = Permission.objects.create(
name="test",
content_type=content_type,
codename="test",
)
# User with permission.
cls.user1 = User.objects.create_user("user 1", "foo@example.com")
cls.user1.user_permissions.add(cls.permission)
# User with group permission.
group1 = Group.objects.create(name="group 1")
group1.permissions.add(cls.permission)
group2 = Group.objects.create(name="group 2")
group2.permissions.add(cls.permission)
cls.user2 = User.objects.create_user("user 2", "bar@example.com")
cls.user2.groups.add(group1, group2)
# Users without permissions.
cls.user_charlie = User.objects.create_user("charlie", "charlie@example.com")
cls.user_charlie_b = User.objects.create_user(
"charliebrown", "charlie@brown.com"
)
# Superuser.
cls.superuser = User.objects.create_superuser(
"superuser",
"superuser@example.com",
"superpassword",
)
# Inactive user with permission.
cls.inactive_user = User.objects.create_user(
"inactive_user",
"baz@example.com",
is_active=False,
)
cls.inactive_user.user_permissions.add(cls.permission)
def test_invalid_permission_name(self):
msg = "Permission name should be in the form app_label.permission_codename."
for perm in ("nodots", "too.many.dots", "...", ""):
with self.subTest(perm), self.assertRaisesMessage(ValueError, msg):
User.objects.with_perm(perm)
def test_invalid_permission_type(self):
msg = "The `perm` argument must be a string or a permission instance."
for perm in (b"auth.test", object(), None):
with self.subTest(perm), self.assertRaisesMessage(TypeError, msg):
User.objects.with_perm(perm)
def test_invalid_backend_type(self):
msg = "backend must be a dotted import path string (got %r)."
for backend in (b"auth_tests.CustomModelBackend", object()):
with self.subTest(backend):
with self.assertRaisesMessage(TypeError, msg % backend):
User.objects.with_perm("auth.test", backend=backend)
def test_basic(self):
active_users = [self.user1, self.user2]
tests = [
({}, [*active_users, self.superuser]),
({"obj": self.user1}, []),
# Only inactive users.
({"is_active": False}, [self.inactive_user]),
# All users.
({"is_active": None}, [*active_users, self.superuser, self.inactive_user]),
# Exclude superusers.
({"include_superusers": False}, active_users),
(
{"include_superusers": False, "is_active": False},
[self.inactive_user],
),
(
{"include_superusers": False, "is_active": None},
[*active_users, self.inactive_user],
),
]
for kwargs, expected_users in tests:
for perm in ("auth.test", self.permission):
with self.subTest(perm=perm, **kwargs):
self.assertCountEqual(
User.objects.with_perm(perm, **kwargs),
expected_users,
)
@override_settings(
AUTHENTICATION_BACKENDS=["django.contrib.auth.backends.BaseBackend"]
)
def test_backend_without_with_perm(self):
self.assertSequenceEqual(User.objects.with_perm("auth.test"), [])
def test_nonexistent_permission(self):
self.assertSequenceEqual(User.objects.with_perm("auth.perm"), [self.superuser])
def test_nonexistent_backend(self):
with self.assertRaises(ImportError):
User.objects.with_perm(
"auth.test",
backend="invalid.backend.CustomModelBackend",
)
def test_invalid_backend_submodule(self):
with self.assertRaises(ImportError):
User.objects.with_perm(
"auth.test",
backend="json.tool",
)
@override_settings(
AUTHENTICATION_BACKENDS=["auth_tests.test_models.CustomModelBackend"]
)
def test_custom_backend(self):
for perm in ("auth.test", self.permission):
with self.subTest(perm):
self.assertCountEqual(
User.objects.with_perm(perm),
[self.user_charlie, self.user_charlie_b],
)
@override_settings(
AUTHENTICATION_BACKENDS=["auth_tests.test_models.CustomModelBackend"]
)
def test_custom_backend_pass_obj(self):
for perm in ("auth.test", self.permission):
with self.subTest(perm):
self.assertSequenceEqual(
User.objects.with_perm(perm, obj=self.user_charlie_b),
[self.user_charlie_b],
)
@override_settings(
AUTHENTICATION_BACKENDS=[
"auth_tests.test_models.CustomModelBackend",
"django.contrib.auth.backends.ModelBackend",
]
)
def test_multiple_backends(self):
msg = (
"You have multiple authentication backends configured and "
"therefore must provide the `backend` argument."
)
with self.assertRaisesMessage(ValueError, msg):
User.objects.with_perm("auth.test")
backend = "auth_tests.test_models.CustomModelBackend"
self.assertCountEqual(
User.objects.with_perm("auth.test", backend=backend),
[self.user_charlie, self.user_charlie_b],
)
| UserWithPermTestCase |
python | dateutil__dateutil | tests/_common.py | {
"start": 6004,
"end": 6589
} | class ____(object):
"""
A class that is always equal to whatever you compare it to.
"""
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __le__(self, other):
return True
def __ge__(self, other):
return True
def __lt__(self, other):
return False
def __gt__(self, other):
return False
__req__ = __eq__
__rne__ = __ne__
__rle__ = __le__
__rge__ = __ge__
__rlt__ = __lt__
__rgt__ = __gt__
ComparesEqual = ComparesEqualClass()
| ComparesEqualClass |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/hooks/test_base_azure.py | {
"start": 1318,
"end": 8173
} | class ____:
@pytest.mark.parametrize(
"mocked_connection",
[Connection(conn_id="azure_default", extra={"key_path": "key_file.json"})],
indirect=True,
)
@patch(f"{MODULE}.get_client_from_auth_file")
def test_get_conn_with_key_path(self, mock_get_client_from_auth_file, mocked_connection):
mock_get_client_from_auth_file.return_value = "foo-bar"
mock_sdk_client = Mock()
auth_sdk_client = AzureBaseHook(mock_sdk_client).get_conn()
mock_get_client_from_auth_file.assert_called_once_with(
client_class=mock_sdk_client, auth_path=mocked_connection.extra_dejson["key_path"]
)
assert auth_sdk_client == "foo-bar"
@pytest.mark.parametrize(
"mocked_connection",
[Connection(conn_id="azure_default", extra={"key_json": {"test": "test"}})],
indirect=True,
)
@patch(f"{MODULE}.get_client_from_json_dict")
def test_get_conn_with_key_json(self, mock_get_client_from_json_dict, mocked_connection):
mock_sdk_client = Mock()
mock_get_client_from_json_dict.return_value = "foo-bar"
auth_sdk_client = AzureBaseHook(mock_sdk_client).get_conn()
mock_get_client_from_json_dict.assert_called_once_with(
client_class=mock_sdk_client, config_dict=mocked_connection.extra_dejson["key_json"]
)
assert auth_sdk_client == "foo-bar"
@patch(f"{MODULE}.ServicePrincipalCredentials")
@pytest.mark.parametrize(
"mocked_connection",
[
Connection(
conn_id="azure_default",
login="my_login",
password="my_password",
extra={"tenantId": "my_tenant", "subscriptionId": "my_subscription"},
)
],
indirect=True,
)
def test_get_conn_with_credentials(self, mock_spc, mocked_connection):
mock_sdk_client = Mock(return_value="spam-egg")
mock_spc.return_value = "foo-bar"
auth_sdk_client = AzureBaseHook(mock_sdk_client).get_conn()
mock_spc.assert_called_once_with(
client_id=mocked_connection.login,
secret=mocked_connection.password,
tenant=mocked_connection.extra_dejson["tenantId"],
)
mock_sdk_client.assert_called_once_with(
credentials="foo-bar",
subscription_id=mocked_connection.extra_dejson["subscriptionId"],
)
assert auth_sdk_client == "spam-egg"
@pytest.mark.parametrize(
"mocked_connection",
[
Connection(
conn_id="azure_default",
extra={
"managed_identity_client_id": "test_client_id",
"workload_identity_tenant_id": "test_tenant_id",
"subscriptionId": "subscription_id",
},
)
],
indirect=True,
)
@patch("azure.common.credentials.ServicePrincipalCredentials")
@patch(f"{MODULE}.AzureIdentityCredentialAdapter")
def test_get_conn_fallback_to_azure_identity_credential_adapter(
self,
mock_credential_adapter,
mock_service_pricipal_credential,
mocked_connection,
):
mock_credential = Mock()
mock_credential_adapter.return_value = mock_credential
mock_sdk_client = Mock()
AzureBaseHook(mock_sdk_client).get_conn()
mock_credential_adapter.assert_called_with(
managed_identity_client_id="test_client_id",
workload_identity_tenant_id="test_tenant_id",
)
assert not mock_service_pricipal_credential.called
mock_sdk_client.assert_called_once_with(
credentials=mock_credential,
subscription_id="subscription_id",
)
@patch(f"{MODULE}.ClientSecretCredential")
@pytest.mark.parametrize(
"mocked_connection",
[
Connection(
conn_id="azure_default",
login="my_login",
password="my_password",
extra={"tenantId": "my_tenant", "use_azure_identity_object": True},
),
],
indirect=True,
)
def test_get_credential_with_client_secret(self, mock_spc, mocked_connection):
mock_spc.return_value = "foo-bar"
cred = AzureBaseHook().get_credential()
mock_spc.assert_called_once_with(
client_id=mocked_connection.login,
client_secret=mocked_connection.password,
tenant_id=mocked_connection.extra_dejson["tenantId"],
)
assert cred == "foo-bar"
@patch(f"{UTILS}.DefaultAzureCredential")
@pytest.mark.parametrize(
"mocked_connection",
[
Connection(
conn_id="azure_default",
extra={"use_azure_identity_object": True},
),
],
indirect=True,
)
def test_get_credential_with_azure_default_credential(self, mock_spc, mocked_connection):
mock_spc.return_value = "foo-bar"
cred = AzureBaseHook().get_credential()
mock_spc.assert_called_once_with()
assert cred == "foo-bar"
@patch(f"{UTILS}.DefaultAzureCredential")
@pytest.mark.parametrize(
"mocked_connection",
[
Connection(
conn_id="azure_default",
extra={
"managed_identity_client_id": "test_client_id",
"workload_identity_tenant_id": "test_tenant_id",
"use_azure_identity_object": True,
},
),
],
indirect=True,
)
def test_get_credential_with_azure_default_credential_with_extra(self, mock_spc, mocked_connection):
mock_spc.return_value = "foo-bar"
cred = AzureBaseHook().get_credential()
mock_spc.assert_called_once_with(
managed_identity_client_id=mocked_connection.extra_dejson.get("managed_identity_client_id"),
workload_identity_tenant_id=mocked_connection.extra_dejson.get("workload_identity_tenant_id"),
additionally_allowed_tenants=[mocked_connection.extra_dejson.get("workload_identity_tenant_id")],
)
assert cred == "foo-bar"
@patch(f"{UTILS}.DefaultAzureCredential")
@pytest.mark.parametrize(
"mocked_connection",
[
Connection(
conn_id="azure_default",
extra={"use_azure_identity_object": True},
),
],
indirect=True,
)
def test_get_token_with_azure_default_credential(self, mock_spc, mocked_connection):
mock_spc.return_value.get_token.return_value = "new-token"
scope = "custom_scope"
token = AzureBaseHook().get_token(scope)
mock_spc.assert_called_once_with()
assert token == "new-token"
| TestBaseAzureHook |
python | sqlalchemy__sqlalchemy | test/orm/test_defaults.py | {
"start": 6706,
"end": 7437
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"dt",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("col1", String(20), default="hello"),
)
def test_exclude(self):
dt = self.tables.dt
class Foo(BasicEntity):
pass
self.mapper_registry.map_imperatively(
Foo, dt, exclude_properties=("col1",)
)
f1 = Foo()
sess = fixture_session()
sess.add(f1)
sess.flush()
eq_(sess.connection().execute(dt.select()).fetchall(), [(1, "hello")])
| ExcludedDefaultsTest |
python | huggingface__transformers | src/transformers/models/lightglue/modeling_lightglue.py | {
"start": 8464,
"end": 11619
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: LightGlueConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
is_cross_attention = encoder_hidden_states is not None
current_states = encoder_hidden_states if is_cross_attention else hidden_states
current_attention_mask = encoder_attention_mask if is_cross_attention else attention_mask
key_states = self.k_proj(current_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(current_states).view(hidden_shape).transpose(1, 2)
if position_embeddings is not None:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
current_attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| LightGlueAttention |
python | joke2k__faker | faker/providers/bank/__init__.py | {
"start": 190,
"end": 6547
} | class ____(BaseProvider):
"""Implement default bank provider for Faker.
.. important::
Bank codes, account numbers, and other ID's generated by this provider
are only valid in form, i.e. they conform to some standard/format, are
of the expected lengths, and have valid checksums (where applicable).
Results generated that turn out to be valid in real life are purely
coincidental.
Sources:
- https://en.wikipedia.org/wiki/International_Bank_Account_Number
- https://www.theswiftcodes.com/swift-code-checker/
"""
ALPHA: Dict[str, str] = {c: str(ord(c) % 55) for c in string.ascii_uppercase}
bban_format: str = "????#############"
country_code: str = "GB"
def aba(self) -> str:
"""Generate an ABA routing transit number."""
fed_num = self.random_int(min=1, max=12)
rand = self.numerify("######")
aba = f"{fed_num:02}{rand}"
# calculate check digit
d = [int(n) for n in aba]
chk_digit = 3 * (d[0] + d[3] + d[6]) + 7 * (d[1] + d[4] + d[7]) + d[2] + d[5]
chk_digit = ceil(chk_digit / 10) * 10 - chk_digit
return f"{aba}{chk_digit}"
def bank_country(self) -> str:
"""Generate the bank provider's ISO 3166-1 alpha-2 country code."""
return self.country_code
def bank(self) -> str:
"""Generate a bank name."""
if not hasattr(self, "banks"):
raise AttributeError(
f"The {self.__class__.__name__} provider does not have a 'banks' "
"attribute. Consider contributing to the project and "
" adding a 'banks' tuple to enable bank name generation."
)
return self.random_element(self.banks)
def bban(self) -> str:
"""Generate a Basic Bank Account Number (BBAN)."""
temp = re.sub(r"\?", lambda x: self.random_element(ascii_uppercase), self.bban_format)
return self.numerify(temp)
def iban(self) -> str:
"""Generate an International Bank Account Number (IBAN)."""
bban = self.bban()
check = bban + self.country_code + "00"
check_ = int("".join(self.ALPHA.get(c, c) for c in check))
check_ = 98 - (check_ % 97)
check = str(check_).zfill(2)
return self.country_code + check + bban
def swift8(self, use_dataset: bool = False) -> str:
"""Generate an 8-digit SWIFT code.
This method uses |swift| under the hood with the ``length`` argument set
to ``8`` and with the ``primary`` argument omitted. All 8-digit SWIFT
codes already refer to the primary branch/office.
:sample:
:sample: use_dataset=True
"""
return self.swift(length=8, use_dataset=use_dataset)
def swift11(self, primary: bool = False, use_dataset: bool = False) -> str:
"""Generate an 11-digit SWIFT code.
This method uses |swift| under the hood with the ``length`` argument set
to ``11``. If ``primary`` is set to ``True``, the SWIFT code will always
end with ``'XXX'``. All 11-digit SWIFT codes use this convention to
refer to the primary branch/office.
:sample:
:sample: use_dataset=True
"""
return self.swift(length=11, primary=primary, use_dataset=use_dataset)
def swift(
self,
length: Optional[int] = None,
primary: bool = False,
use_dataset: bool = False,
) -> str:
"""Generate a SWIFT code.
SWIFT codes, reading from left to right, are composed of a 4 alphabet
character bank code, a 2 alphabet character country code, a 2
alphanumeric location code, and an optional 3 alphanumeric branch code.
This means SWIFT codes can only have 8 or 11 characters, so the value of
``length`` can only be ``None`` or the integers ``8`` or ``11``. If the
value is ``None``, then a value of ``8`` or ``11`` will randomly be
assigned.
Because all 8-digit SWIFT codes already refer to the primary branch or
office, the ``primary`` argument only has an effect if the value of
``length`` is ``11``. If ``primary`` is ``True`` and ``length`` is
``11``, the 11-digit SWIFT codes generated will always end in ``'XXX'``
to denote that they belong to primary branches/offices.
For extra authenticity, localized providers may opt to include SWIFT
bank codes, location codes, and branch codes used in their respective
locales. If ``use_dataset`` is ``True``, this method will generate SWIFT
codes based on those locale-specific codes if included. If those codes
were not included, then it will behave as if ``use_dataset`` were
``False``, and in that mode, all those codes will just be randomly
generated as per the specification.
:sample:
:sample: length=8
:sample: length=8, use_dataset=True
:sample: length=11
:sample: length=11, primary=True
:sample: length=11, use_dataset=True
:sample: length=11, primary=True, use_dataset=True
"""
if length is None:
length = self.random_element((8, 11))
if length not in (8, 11):
raise AssertionError("length can only be 8 or 11")
if use_dataset and hasattr(self, "swift_bank_codes"):
bank_code: str = self.random_element(self.swift_bank_codes) # type: ignore[attr-defined]
else:
bank_code = self.lexify("????", letters=string.ascii_uppercase)
if use_dataset and hasattr(self, "swift_location_codes"):
location_code: str = self.random_element(self.swift_location_codes) # type: ignore[attr-defined]
else:
location_code = self.lexify("??", letters=string.ascii_uppercase + string.digits)
if length == 8:
return bank_code + self.country_code + location_code
if primary:
branch_code = "XXX"
elif use_dataset and hasattr(self, "swift_branch_codes"):
branch_code = self.random_element(self.swift_branch_codes) # type: ignore[attr-defined]
else:
branch_code = self.lexify("???", letters=string.ascii_uppercase + string.digits)
return bank_code + self.country_code + location_code + branch_code
| Provider |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 30946,
"end": 37191
} | class ____(Expr):
_parameters = ["frame", "before", "after"]
@functools.cached_property
def _meta(self):
return self.frame._meta
def _divisions(self):
# Keep divisions alive, MapPartitions will handle the actual division logic
return self.frame.divisions
def _layer(self) -> dict:
dsk, prevs, nexts = {}, [], [] # type: ignore[var-annotated]
name_prepend = f"overlap-prepend-{self._name}"
if self.before:
prevs.append(None)
if isinstance(self.before, numbers.Integral):
before = self.before
for i in range(self.frame.npartitions - 1):
dsk[(name_prepend, i)] = (M.tail, (self.frame._name, i), before)
prevs.append((name_prepend, i))
elif isinstance(self.before, datetime.timedelta):
# Assumes monotonic (increasing?) index
divs = pd.Series(self.frame.divisions)
deltas = divs.diff().iloc[1:-1]
# In the first case window-size is larger than at least one partition, thus it is
# necessary to calculate how many partitions must be used for each rolling task.
# Otherwise, these calculations can be skipped (faster)
if (self.before > deltas).any():
pt_z = divs[0]
for i in range(self.frame.npartitions - 1):
# Select all indexes of relevant partitions between the current partition and
# the partition with the highest division outside the rolling window (before)
pt_i = divs[i + 1]
# lower-bound the search to the first division
lb = max(pt_i - self.before, pt_z)
first, j = divs[i], i
while first > lb and j > 0:
first = first - deltas[j]
j = j - 1
dsk[(name_prepend, i)] = ( # type: ignore[assignment]
_tail_timedelta,
(self.frame._name, i + 1),
[(self.frame._name, k) for k in range(j, i + 1)],
self.before,
)
prevs.append((name_prepend, i))
else:
for i in range(self.frame.npartitions - 1):
dsk[(name_prepend, i)] = ( # type: ignore[assignment]
_tail_timedelta,
(self.frame._name, i + 1),
[(self.frame._name, i)],
self.before,
)
prevs.append((name_prepend, i))
else:
prevs.extend([None] * self.frame.npartitions) # type: ignore[list-item]
name_append = f"overlap-append-{self._name}"
if self.after:
if isinstance(self.after, numbers.Integral):
after = self.after
for i in range(1, self.frame.npartitions):
dsk[(name_append, i)] = (M.head, (self.frame._name, i), after)
nexts.append((name_append, i))
else:
# We don't want to look at the divisions, so take twice the step and
# validate later.
after = 2 * self.after
for i in range(1, self.frame.npartitions):
dsk[(name_append, i)] = ( # type: ignore[assignment]
_head_timedelta,
(self.frame._name, i - 1),
(self.frame._name, i),
after,
)
nexts.append((name_append, i))
nexts.append(None) # type: ignore[arg-type]
else:
nexts.extend([None] * self.frame.npartitions) # type: ignore[list-item]
for i, (prev, next) in enumerate(zip(prevs, nexts)):
dsk[(self._name, i)] = ( # type: ignore[assignment]
_combined_parts,
prev,
(self.frame._name, i),
next,
self.before,
self.after,
)
return dsk
def _tail_timedelta(current, prev_, before):
selected = methods.concat(
[prev[prev.index > (current.index.min() - before)] for prev in prev_]
)
return selected
def _overlap_chunk(df, func, before, after, *args, **kwargs):
return overlap_chunk(func, before, after, df, *args, **kwargs)
def _combined_parts(prev_part, current_part, next_part, before, after):
msg = (
"Partition size is less than overlapping "
"window size. Try using ``df.repartition`` "
"to increase the partition size."
)
if prev_part is not None:
if isinstance(before, numbers.Integral):
if prev_part.shape[0] != before:
raise NotImplementedError(msg)
else:
prev_part_input = prev_part
prev_part = _tail_timedelta(current_part, [prev_part], before)
if (
len(prev_part_input) == len(prev_part)
and len(prev_part_input) > 0
and not isinstance(before, datetime.timedelta)
):
raise NotImplementedError(msg)
if next_part is not None:
if isinstance(after, numbers.Integral):
if next_part.shape[0] != after:
raise NotImplementedError(msg)
else:
next_part_input = next_part
next_part = _head_timedelta(current_part, next_part, after)
if len(next_part_input) == len(next_part) and len(next_part_input) > 0:
raise NotImplementedError(msg)
parts = [p for p in (prev_part, current_part, next_part) if p is not None]
combined = methods.concat(parts)
return CombinedOutput(
(
combined,
len(prev_part) if prev_part is not None and len(prev_part) > 0 else None,
len(next_part) if next_part is not None and len(next_part) > 0 else None,
)
)
| CreateOverlappingPartitions |
python | huggingface__transformers | src/transformers/models/videomae/modeling_videomae.py | {
"start": 4523,
"end": 7947
} | class ____(nn.Module):
"""
Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels,
height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.
The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width //
patch_size).
"""
def __init__(self, config):
super().__init__()
image_size = config.image_size
patch_size = config.patch_size
num_channels = config.num_channels
hidden_size = config.hidden_size
num_frames = config.num_frames
tubelet_size = config.tubelet_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
self.image_size = image_size
self.patch_size = patch_size
self.tubelet_size = int(tubelet_size)
num_patches = (
(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) * (num_frames // self.tubelet_size)
)
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv3d(
in_channels=num_channels,
out_channels=hidden_size,
kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]),
stride=(self.tubelet_size, patch_size[0], patch_size[1]),
)
def forward(self, pixel_values):
batch_size, num_frames, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
)
# permute to (batch_size, num_channels, num_frames, height, width)
pixel_values = pixel_values.permute(0, 2, 1, 3, 4)
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
return embeddings
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| VideoMAEPatchEmbeddings |
python | pydata__xarray | xarray/coding/variables.py | {
"start": 17907,
"end": 20948
} | class ____(VariableCoder):
"""Scale and offset variables according to CF conventions.
Follows the formula:
decode_values = encoded_values * scale_factor + add_offset
"""
def __init__(
self,
decode_times: bool | CFDatetimeCoder = False,
decode_timedelta: bool | CFTimedeltaCoder = False,
) -> None:
self.decode_times = decode_times
self.decode_timedelta = decode_timedelta
def encode(self, variable: Variable, name: T_Name = None) -> Variable:
dims, data, attrs, encoding = unpack_for_encoding(variable)
if "scale_factor" in encoding or "add_offset" in encoding:
# if we have a _FillValue/masked_value we do not want to cast now
# but leave that to CFMaskCoder
dtype = data.dtype
if "_FillValue" not in encoding and "missing_value" not in encoding:
dtype = _choose_float_dtype(data.dtype, encoding)
# but still we need a copy prevent changing original data
data = duck_array_ops.astype(data, dtype=dtype, copy=True)
if "add_offset" in encoding:
data -= pop_to(encoding, attrs, "add_offset", name=name)
if "scale_factor" in encoding:
data /= pop_to(encoding, attrs, "scale_factor", name=name)
return Variable(dims, data, attrs, encoding, fastpath=True)
def decode(self, variable: Variable, name: T_Name = None) -> Variable:
_attrs = variable.attrs
if "scale_factor" in _attrs or "add_offset" in _attrs:
dims, data, attrs, encoding = unpack_for_decoding(variable)
scale_factor = pop_to(attrs, encoding, "scale_factor", name=name)
add_offset = pop_to(attrs, encoding, "add_offset", name=name)
if duck_array_ops.ndim(scale_factor) > 0:
scale_factor = np.asarray(scale_factor).item()
if duck_array_ops.ndim(add_offset) > 0:
add_offset = np.asarray(add_offset).item()
# if we have a _FillValue/masked_value in encoding we already have the wanted
# floating point dtype here (via CFMaskCoder), so no check is necessary
# only check in other cases and for time-like
dtype = data.dtype
is_time_like = _is_time_like(attrs.get("units"))
if (
("_FillValue" not in encoding and "missing_value" not in encoding)
or (is_time_like == "datetime" and self.decode_times)
or (is_time_like == "timedelta" and self.decode_timedelta)
):
dtype = _choose_float_dtype(dtype, encoding)
transform = partial(
_scale_offset_decoding,
scale_factor=scale_factor,
add_offset=add_offset,
dtype=dtype,
)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding, fastpath=True)
else:
return variable
| CFScaleOffsetCoder |
python | django__django | django/contrib/postgres/fields/array.py | {
"start": 10233,
"end": 10330
} | class ____(ArrayRHSMixin, lookups.DataContains):
pass
@ArrayField.register_lookup
| ArrayContains |
python | celery__celery | t/unit/backends/test_base.py | {
"start": 52073,
"end": 53012
} | class ____:
def test_get(self):
with pytest.raises(NotImplementedError):
KeyValueStoreBackend(self.app).get('a')
def test_set(self):
with pytest.raises(NotImplementedError):
KeyValueStoreBackend(self.app)._set_with_state('a', 1, states.SUCCESS)
def test_incr(self):
with pytest.raises(NotImplementedError):
KeyValueStoreBackend(self.app).incr('a')
def test_cleanup(self):
assert not KeyValueStoreBackend(self.app).cleanup()
def test_delete(self):
with pytest.raises(NotImplementedError):
KeyValueStoreBackend(self.app).delete('a')
def test_mget(self):
with pytest.raises(NotImplementedError):
KeyValueStoreBackend(self.app).mget(['a'])
def test_forget(self):
with pytest.raises(NotImplementedError):
KeyValueStoreBackend(self.app).forget('a')
| test_KeyValueStoreBackend_interface |
python | ray-project__ray | release/ray_release/log_aggregator.py | {
"start": 93,
"end": 3883
} | class ____:
def __init__(self, log: str):
self.log = log
def compute_crash_pattern(self) -> str:
stack_trace = LogAggregator._compute_stack_trace(self.log.splitlines())
# truncate short enough to store in databases, but long enough to keep the
# pattern unique
return LogAggregator._compute_signature(stack_trace)[:4000]
@staticmethod
def _compute_signature(stack_trace: List[str]) -> str:
"""
Compute signature pattern from stack trace, by remove factors such as date,
time, temp directory, line numbers, etc. This help to aggregate similar logs
into same bug patterns
"""
massaged_trace = []
for line in stack_trace:
# remove any hashes that are more than 10 characters
line = re.sub(r"[a-z0-9]{10,}", "", line.strip())
# remove any numbers
line = re.sub(r"\d", "", line)
if line == "Traceback (most recent call last):":
continue
file_line = re.search(r'File "(.*)", (.*)', line)
if file_line:
# append the file's base name and caller information; the result string
# is not something meaningful to human, we just need something that
# uniquely represent the stack trace
line = f'{file_line.group(1).split("/")[-1]}{file_line.group(2)}'
massaged_trace.append(line)
return "".join(massaged_trace)
@staticmethod
def _compute_stack_trace(logs: List[str]) -> List[str]:
"""
Extract stack trace pattern from the logs. Stack trace pattern often matches
the following:
ERROR ...
Traceback (most recent call last):
File "...", line ..., in ...
...
Exception: exception error
"""
error_stacktrace = []
stacktrace = []
i = 0
while i < len(logs):
stack = []
trace = error_stacktrace
# Search for lines that are either
# ... ERROR ...
# or
# ... ERROR ...
# Traceback (most recent call last):
if "ERROR" in logs[i]:
stack.append(logs[i])
next = i + 1
if i + 1 < len(logs) and TRACEBACK_PATTERN in logs[i + 1]:
stack.append(logs[i + 1])
next = i + 2
# Or if the line with ERROR does not exist, just search for the line with
# Traceback (most recent call last):
elif TRACEBACK_PATTERN in logs[i]:
stack.append(logs[i])
trace = stacktrace
next = i + 1
# Or else, skip this line and continue
else:
i = i + 1
continue
# If the line that contains ERROR, Traceback, etc. is found, scan the logs
# until the line no longer has indentation. This is because stack trace
# is always indented, and stops when the line is no longer indented
while next < len(logs):
if logs[next].startswith((" ", "\t")):
stack.append(logs[next])
next = next + 1
else:
break
# Finished capturing the entire stack trace
if next < len(logs):
stack.append(logs[next])
if stack:
trace.append(stack)
i = next + 1
# Favor stack trace that contains the ERROR keyword
if error_stacktrace:
return error_stacktrace[-1]
# Otherwise any stack trace is fine
if stacktrace:
return stacktrace[-1]
return []
| LogAggregator |
python | getsentry__sentry | tests/sentry/search/test_utils.py | {
"start": 28947,
"end": 34577
} | class ____(TestCase):
def test(self) -> None:
with pytest.raises(Release.DoesNotExist):
# no releases exist period
environment = None
get_latest_release([self.project], environment)
old = self.create_release(version="old")
new_date = old.date_added + timedelta(minutes=1)
new = self.create_release(
version="new-but-in-environment",
environments=[self.environment],
date_released=new_date,
)
newest = self.create_release(
version="newest-overall", date_released=old.date_added + timedelta(minutes=5)
)
# latest overall (no environment filter)
environment = None
result = get_latest_release([self.project], environment)
assert result == [newest.version]
# latest in environment
environment = self.environment
result = get_latest_release([self.project], [environment])
assert result == [new.version]
assert get_latest_release([self.project.id], [environment]) == []
assert get_latest_release(
[self.project.id], [environment], self.project.organization_id
) == [new.version]
# Verify that not passing an environment correctly gets the latest one
assert get_latest_release([self.project], None) == [newest.version]
assert get_latest_release([self.project], []) == [newest.version]
with pytest.raises(Release.DoesNotExist):
# environment with no releases
new_environment = self.create_environment()
get_latest_release([self.project], [new_environment])
project_2 = self.create_project()
other_project_env_release = self.create_release(
project_2, version="other_project_env", environments=[self.environment]
)
other_project_release = self.create_release(project_2, version="other_project")
assert get_latest_release([project_2], None) == [other_project_release.version]
assert get_latest_release([project_2], [environment]) == [other_project_env_release.version]
assert get_latest_release([self.project, project_2], None) == [
newest.version,
other_project_release.version,
]
assert get_latest_release([self.project, project_2], [environment]) == [
new.version,
other_project_env_release.version,
]
with pytest.raises(Release.DoesNotExist):
assert get_latest_release([self.project, project_2], [environment], adopted=True) == [
new.version,
other_project_env_release.version,
]
ReleaseProjectEnvironment.objects.filter(
release__in=[new, other_project_env_release]
).update(adopted=timezone.now())
assert get_latest_release([self.project, project_2], [environment], adopted=True) == [
new.version,
other_project_env_release.version,
]
def test_semver(self) -> None:
project_2 = self.create_project()
release_1 = self.create_release(version="test@2.0.0", environments=[self.environment])
env_2 = self.create_environment()
self.create_release(version="test@1.3.2", environments=[env_2])
self.create_release(version="test@1.0.0", environments=[self.environment, env_2])
# Check when we're using a single project that we sort by semver
assert get_latest_release([self.project], None) == [release_1.version]
assert get_latest_release([project_2, self.project], None) == [release_1.version]
release_3 = self.create_release(
project_2, version="test@1.3.3", environments=[self.environment, env_2]
)
assert get_latest_release([project_2, self.project], None) == [
release_3.version,
release_1.version,
]
with pytest.raises(Release.DoesNotExist):
get_latest_release([project_2, self.project], [self.environment, env_2], adopted=True)
ReleaseProjectEnvironment.objects.filter(release__in=[release_3, release_1]).update(
adopted=timezone.now()
)
assert get_latest_release(
[project_2, self.project], [self.environment, env_2], adopted=True
) == [
release_3.version,
release_1.version,
]
assert get_latest_release([project_2, self.project], [env_2], adopted=True) == [
release_3.version,
]
# Make sure unadopted releases are ignored
ReleaseProjectEnvironment.objects.filter(release__in=[release_3]).update(
unadopted=timezone.now()
)
assert get_latest_release(
[project_2, self.project], [self.environment, env_2], adopted=True
) == [
release_1.version,
]
ReleaseProject.objects.filter(release__in=[release_1]).update(adopted=timezone.now())
assert get_latest_release([project_2, self.project], None, adopted=True) == [
release_1.version,
]
def test_multiple_projects_mixed_versions(self) -> None:
project_2 = self.create_project()
release_1 = self.create_release(version="test@2.0.0")
self.create_release(project_2, version="not_semver")
release_2 = self.create_release(project_2, version="not_semver_2")
self.create_release(version="test@1.0.0")
assert get_latest_release([project_2, self.project], None) == [
release_2.version,
release_1.version,
]
| GetLatestReleaseTest |
python | gevent__gevent | src/greentest/3.13/test_socket.py | {
"start": 228912,
"end": 229797
} | class ____(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
self.assertIs(socket.error, OSError)
self.assertIs(socket.timeout, TimeoutError)
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform in ('linux', 'android'), 'Linux specific test')
| TestExceptions |
python | davidhalter__jedi | jedi/plugins/django.py | {
"start": 10656,
"end": 10895
} | class ____(ValueWrapper):
def __init__(self, method, model_cls):
super().__init__(method)
self._model_cls = model_cls
def get_signatures(self):
return _get_signatures(self._model_cls)
| QuerySetBoundMethodWrapper |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 225121,
"end": 258356
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(7195199371)
@pytest.fixture(autouse=True)
def reset_levy_stable_params(self):
"""Setup default parameters for levy_stable generator"""
stats.levy_stable.parameterization = "S1"
stats.levy_stable.cdf_default_method = "piecewise"
stats.levy_stable.pdf_default_method = "piecewise"
stats.levy_stable.quad_eps = stats._levy_stable._QUAD_EPS
@pytest.fixture
def nolan_pdf_sample_data(self):
"""Sample data points for pdf computed with Nolan's stablec
See - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
The data table loaded below is generated from Nolan's stablec
with the following parameter space:
alpha = 0.1, 0.2, ..., 2.0
beta = -1.0, -0.9, ..., 1.0
p = 0.01, 0.05, 0.1, 0.25, 0.35, 0.5,
and the equivalent for the right tail
Typically inputs for stablec:
stablec.exe <<
1 # pdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(
Path(__file__).parent /
'data/levy_stable/stable-Z1-pdf-sample-data.npy'
)
data = np.rec.fromarrays(data.T, names='x,p,alpha,beta,pct')
return data
@pytest.fixture
def nolan_cdf_sample_data(self):
"""Sample data points for cdf computed with Nolan's stablec
See - http://fs2.american.edu/jpnolan/www/stable/stable.html
There's a known limitation of Nolan's executable for alpha < 0.2.
The data table loaded below is generated from Nolan's stablec
with the following parameter space:
alpha = 0.1, 0.2, ..., 2.0
beta = -1.0, -0.9, ..., 1.0
p = 0.01, 0.05, 0.1, 0.25, 0.35, 0.5,
and the equivalent for the right tail
Ideally, Nolan's output for CDF values should match the percentile
from where they have been sampled from. Even more so as we extract
percentile x positions from stablec too. However, we note at places
Nolan's stablec will produce absolute errors in order of 1e-5. We
compare against his calculations here. In future, once we less
reliant on Nolan's paper we might switch to comparing directly at
percentiles (those x values being produced from some alternative
means).
Typically inputs for stablec:
stablec.exe <<
2 # cdf
1 # Nolan S equivalent to S0 in scipy
.25,2,.25 # alpha
-1,-1,0 # beta
-10,10,1 # x
1,0 # gamma, delta
2 # output file
"""
data = np.load(
Path(__file__).parent /
'data/levy_stable/stable-Z1-cdf-sample-data.npy'
)
data = np.rec.fromarrays(data.T, names='x,p,alpha,beta,pct')
return data
@pytest.fixture
def nolan_loc_scale_sample_data(self):
"""Sample data where loc, scale are different from 0, 1
Data extracted in similar way to pdf/cdf above using
Nolan's stablec but set to an arbitrary location scale of
(2, 3) for various important parameters alpha, beta and for
parameterisations S0 and S1.
"""
data = np.load(
Path(__file__).parent /
'data/levy_stable/stable-loc-scale-sample-data.npy'
)
return data
@pytest.mark.slow
@pytest.mark.parametrize(
"sample_size", [
pytest.param(50), pytest.param(1500, marks=pytest.mark.slow)
]
)
@pytest.mark.parametrize("parameterization", ["S0", "S1"])
@pytest.mark.parametrize(
"alpha,beta", [(1.0, 0), (1.0, -0.5), (1.5, 0), (1.9, 0.5)]
)
@pytest.mark.parametrize("gamma,delta", [(1, 0), (3, 2)])
def test_rvs(
self,
parameterization,
alpha,
beta,
gamma,
delta,
sample_size,
):
stats.levy_stable.parameterization = parameterization
ls = stats.levy_stable(
alpha=alpha, beta=beta, scale=gamma, loc=delta
)
_, p = stats.kstest(
ls.rvs(size=sample_size, random_state=self.rng), ls.cdf
)
assert p > 0.05
@pytest.mark.xslow
@pytest.mark.parametrize('beta', [0.5, 1])
def test_rvs_alpha1(self, beta):
"""Additional test cases for rvs for alpha equal to 1."""
alpha = 1.0
loc = 0.5
scale = 1.5
x = stats.levy_stable.rvs(alpha, beta, loc=loc, scale=scale,
size=5000, random_state=self.rng)
stat, p = stats.kstest(x, 'levy_stable',
args=(alpha, beta, loc, scale))
assert p > 0.01
def test_fit(self):
# construct data to have percentiles that match
# example in McCulloch 1986.
x = [
-.05413, -.05413, 0., 0., 0., 0., .00533, .00533, .00533, .00533,
.00533, .03354, .03354, .03354, .03354, .03354, .05309, .05309,
.05309, .05309, .05309
]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert_allclose(alpha1, 1.48, rtol=0, atol=0.01)
assert_almost_equal(beta1, -.22, 2)
assert_almost_equal(scale1, 0.01717, 4)
assert_almost_equal(
loc1, 0.00233, 2
) # to 2 dps due to rounding error in McCulloch86
# cover alpha=2 scenario
x2 = x + [.05309, .05309, .05309, .05309, .05309]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert_equal(alpha2, 2)
assert_equal(beta2, -1)
assert_almost_equal(scale2, .02503, 4)
assert_almost_equal(loc2, .03354, 4)
@pytest.mark.xfail(reason="Unknown problem with fitstart.")
@pytest.mark.parametrize(
"alpha,beta,delta,gamma",
[
(1.5, 0.4, 2, 3),
(1.0, 0.4, 2, 3),
]
)
@pytest.mark.parametrize(
"parametrization", ["S0", "S1"]
)
def test_fit_rvs(self, alpha, beta, delta, gamma, parametrization):
"""Test that fit agrees with rvs for each parametrization."""
stats.levy_stable.parametrization = parametrization
data = stats.levy_stable.rvs(
alpha, beta, loc=delta, scale=gamma, size=10000, random_state=self.rng
)
fit = stats.levy_stable._fitstart(data)
alpha_obs, beta_obs, delta_obs, gamma_obs = fit
assert_allclose(
[alpha, beta, delta, gamma],
[alpha_obs, beta_obs, delta_obs, gamma_obs],
rtol=0.01,
)
def test_fit_beta_flip(self):
# Confirm that sign of beta affects loc, not alpha or scale.
x = np.array([1, 1, 3, 3, 10, 10, 10, 30, 30, 100, 100])
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(-x)
assert_equal(beta1, 1)
assert loc1 != 0
assert_almost_equal(alpha2, alpha1)
assert_almost_equal(beta2, -beta1)
assert_almost_equal(loc2, -loc1)
assert_almost_equal(scale2, scale1)
def test_fit_delta_shift(self):
# Confirm that loc slides up and down if data shifts.
SHIFT = 1
x = np.array([1, 1, 3, 3, 10, 10, 10, 30, 30, 100, 100])
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(-x)
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(-x + SHIFT)
assert_almost_equal(alpha2, alpha1)
assert_almost_equal(beta2, beta1)
assert_almost_equal(loc2, loc1 + SHIFT)
assert_almost_equal(scale2, scale1)
def test_fit_loc_extrap(self):
# Confirm that loc goes out of sample for alpha close to 1.
x = [1, 1, 3, 3, 10, 10, 10, 30, 30, 140, 140]
alpha1, beta1, loc1, scale1 = stats.levy_stable._fitstart(x)
assert alpha1 < 1, f"Expected alpha < 1, got {alpha1}"
assert loc1 < min(x), f"Expected loc < {min(x)}, got {loc1}"
x2 = [1, 1, 3, 3, 10, 10, 10, 30, 30, 130, 130]
alpha2, beta2, loc2, scale2 = stats.levy_stable._fitstart(x2)
assert alpha2 > 1, f"Expected alpha > 1, got {alpha2}"
assert loc2 > max(x2), f"Expected loc > {max(x2)}, got {loc2}"
@pytest.mark.slow
@pytest.mark.parametrize(
"pct_range,alpha_range,beta_range", [
pytest.param(
[.01, .5, .99],
[.1, 1, 2],
[-1, 0, .8],
),
pytest.param(
[.01, .05, .5, .95, .99],
[.1, .5, 1, 1.5, 2],
[-.9, -.5, 0, .3, .6, 1],
marks=pytest.mark.slow
),
pytest.param(
[.01, .05, .1, .25, .35, .5, .65, .75, .9, .95, .99],
np.linspace(0.1, 2, 20),
np.linspace(-1, 1, 21),
marks=pytest.mark.xslow,
),
]
)
def test_pdf_nolan_samples(
self, nolan_pdf_sample_data, pct_range, alpha_range, beta_range
):
"""Test pdf values against Nolan's stablec.exe output"""
data = nolan_pdf_sample_data
# some tests break on linux 32 bit
uname = platform.uname()
is_linux_32 = uname.system == 'Linux' and uname.machine == 'i686'
platform_desc = "/".join(
[uname.system, uname.machine, uname.processor])
# fmt: off
# There are a number of cases which fail on some but not all platforms.
# These are excluded by the filters below. TODO: Rewrite tests so that
# the now filtered out test cases are still run but marked in pytest as
# expected to fail.
tests = [
[
'dni', 1e-7, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
~(
(
(r['beta'] == 0) &
(r['pct'] == 0.5)
) |
(
(r['beta'] >= 0.9) &
(r['alpha'] >= 1.6) &
(r['pct'] == 0.5)
) |
(
(r['alpha'] <= 0.4) &
np.isin(r['pct'], [.01, .99])
) |
(
(r['alpha'] <= 0.3) &
np.isin(r['pct'], [.05, .95])
) |
(
(r['alpha'] <= 0.2) &
np.isin(r['pct'], [.1, .9])
) |
(
(r['alpha'] == 0.1) &
np.isin(r['pct'], [.25, .75]) &
np.isin(np.abs(r['beta']), [.5, .6, .7])
) |
(
(r['alpha'] == 0.1) &
np.isin(r['pct'], [.5]) &
np.isin(np.abs(r['beta']), [.1])
) |
(
(r['alpha'] == 0.1) &
np.isin(r['pct'], [.35, .65]) &
np.isin(np.abs(r['beta']), [-.4, -.3, .3, .4, .5])
) |
(
(r['alpha'] == 0.2) &
(r['beta'] == 0.5) &
(r['pct'] == 0.25)
) |
(
(r['alpha'] == 0.2) &
(r['beta'] == -0.3) &
(r['pct'] == 0.65)
) |
(
(r['alpha'] == 0.2) &
(r['beta'] == 0.3) &
(r['pct'] == 0.35)
) |
(
(r['alpha'] == 1.) &
np.isin(r['pct'], [.5]) &
np.isin(np.abs(r['beta']), [.1, .2, .3, .4])
) |
(
(r['alpha'] == 1.) &
np.isin(r['pct'], [.35, .65]) &
np.isin(np.abs(r['beta']), [.8, .9, 1.])
) |
(
(r['alpha'] == 1.) &
np.isin(r['pct'], [.01, .99]) &
np.isin(np.abs(r['beta']), [-.1, .1])
) |
# various points ok but too sparse to list
(r['alpha'] >= 1.1)
)
)
],
# piecewise generally good accuracy
[
'piecewise', 1e-11, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 0.2) &
(r['alpha'] != 1.)
)
],
# for alpha = 1. for linux 32 bit optimize.bisect
# has some issues for .01 and .99 percentile
[
'piecewise', 1e-11, lambda r: (
(r['alpha'] == 1.) &
(not is_linux_32) &
np.isin(r['pct'], pct_range) &
(1. in alpha_range) &
np.isin(r['beta'], beta_range)
)
],
# for small alpha very slightly reduced accuracy
[
'piecewise', 2.5e-10, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] <= 0.2)
)
],
# fft accuracy reduces as alpha decreases
[
'fft-simpson', 1e-5, lambda r: (
(r['alpha'] >= 1.9) &
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range)
),
],
[
'fft-simpson', 1e-6, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1) &
(r['alpha'] < 1.9)
)
],
# fft relative errors for alpha < 1, will raise if enabled
# ['fft-simpson', 1e-4, lambda r: r['alpha'] == 0.9],
# ['fft-simpson', 1e-3, lambda r: r['alpha'] == 0.8],
# ['fft-simpson', 1e-2, lambda r: r['alpha'] == 0.7],
# ['fft-simpson', 1e-1, lambda r: r['alpha'] == 0.6],
]
# fmt: on
for ix, (default_method, rtol,
filter_func) in enumerate(tests):
stats.levy_stable.pdf_default_method = default_method
subdata = data[filter_func(data)
] if filter_func is not None else data
msg = "Density calculations experimental for FFT method"
with warnings.catch_warnings():
warnings.filterwarnings("ignore", msg, RuntimeWarning)
# occurs in FFT methods only
p = stats.levy_stable.pdf(
subdata['x'],
subdata['alpha'],
subdata['beta'],
scale=1,
loc=0
)
with np.errstate(over="ignore"):
subdata2 = rec_append_fields(
subdata,
['calc', 'abserr', 'relerr'],
[
p,
np.abs(p - subdata['p']),
np.abs(p - subdata['p']) / np.abs(subdata['p'])
]
)
failures = subdata2[
(subdata2['relerr'] >= rtol) |
np.isnan(p)
]
message = (
f"pdf test {ix} failed with method '{default_method}' "
f"[platform: {platform_desc}]\n{failures.dtype.names}\n{failures}"
)
assert_allclose(
p,
subdata['p'],
rtol,
err_msg=message,
verbose=False
)
@pytest.mark.parametrize(
"pct_range,alpha_range,beta_range", [
pytest.param(
[.01, .5, .99],
[.1, 1, 2],
[-1, 0, .8],
),
pytest.param(
[.01, .05, .5, .95, .99],
[.1, .5, 1, 1.5, 2],
[-.9, -.5, 0, .3, .6, 1],
marks=pytest.mark.slow
),
pytest.param(
[.01, .05, .1, .25, .35, .5, .65, .75, .9, .95, .99],
np.linspace(0.1, 2, 20),
np.linspace(-1, 1, 21),
marks=pytest.mark.xslow,
),
]
)
def test_cdf_nolan_samples(
self, nolan_cdf_sample_data, pct_range, alpha_range, beta_range
):
""" Test cdf values against Nolan's stablec.exe output."""
data = nolan_cdf_sample_data
tests = [
# piecewise generally good accuracy
[
'piecewise', 2e-12, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
~(
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [-0.3, -0.2, -0.1]) &
(r['pct'] == 0.01)
) |
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [0.1, 0.2, 0.3]) &
(r['pct'] == 0.99)
)
)
)
],
# for some points with alpha=1, Nolan's STABLE clearly
# loses accuracy
[
'piecewise', 5e-2, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [-0.3, -0.2, -0.1]) &
(r['pct'] == 0.01)
) |
(
(r['alpha'] == 1.) &
np.isin(r['beta'], [0.1, 0.2, 0.3]) &
(r['pct'] == 0.99)
)
)
],
# fft accuracy poor, very poor alpha < 1
[
'fft-simpson', 1e-5, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.7)
)
],
[
'fft-simpson', 1e-4, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.5) &
(r['alpha'] <= 1.7)
)
],
[
'fft-simpson', 1e-3, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.3) &
(r['alpha'] <= 1.5)
)
],
[
'fft-simpson', 1e-2, lambda r: (
np.isin(r['pct'], pct_range) &
np.isin(r['alpha'], alpha_range) &
np.isin(r['beta'], beta_range) &
(r['alpha'] > 1.0) &
(r['alpha'] <= 1.3)
)
],
]
for ix, (default_method, rtol,
filter_func) in enumerate(tests):
stats.levy_stable.cdf_default_method = default_method
subdata = data[filter_func(data)
] if filter_func is not None else data
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
('Cumulative density calculations experimental for FFT'
' method. Use piecewise method instead.'),
RuntimeWarning)
p = stats.levy_stable.cdf(
subdata['x'],
subdata['alpha'],
subdata['beta'],
scale=1,
loc=0
)
with np.errstate(over="ignore"):
subdata2 = rec_append_fields(
subdata,
['calc', 'abserr', 'relerr'],
[
p,
np.abs(p - subdata['p']),
np.abs(p - subdata['p']) / np.abs(subdata['p'])
]
)
failures = subdata2[
(subdata2['relerr'] >= rtol) |
np.isnan(p)
]
message = (f"cdf test {ix} failed with method '{default_method}'\n"
f"{failures.dtype.names}\n{failures}")
assert_allclose(
p,
subdata['p'],
rtol,
err_msg=message,
verbose=False
)
@pytest.mark.parametrize("param", [0, 1])
@pytest.mark.parametrize("case", ["pdf", "cdf"])
def test_location_scale(
self, nolan_loc_scale_sample_data, param, case
):
"""Tests for pdf and cdf where loc, scale are different from 0, 1
"""
uname = platform.uname()
is_linux_32 = uname.system == 'Linux' and "32bit" in platform.architecture()[0]
# Test seems to be unstable (see gh-17839 for a bug report on Debian
# i386), so skip it.
if is_linux_32 and case == 'pdf':
pytest.skip("Test unstable on some platforms; see gh-17839, 17859")
data = nolan_loc_scale_sample_data
# We only test against piecewise as location/scale transforms
# are same for other methods.
stats.levy_stable.cdf_default_method = "piecewise"
stats.levy_stable.pdf_default_method = "piecewise"
subdata = data[data["param"] == param]
stats.levy_stable.parameterization = f"S{param}"
assert case in ["pdf", "cdf"]
function = (
stats.levy_stable.pdf if case == "pdf" else stats.levy_stable.cdf
)
v1 = function(
subdata['x'], subdata['alpha'], subdata['beta'], scale=2, loc=3
)
assert_allclose(v1, subdata[case], 1e-5)
@pytest.mark.parametrize(
"method,decimal_places",
[
['dni', 4],
['piecewise', 4],
]
)
def test_pdf_alpha_equals_one_beta_non_zero(self, method, decimal_places):
""" sample points extracted from Tables and Graphs of Stable
Probability Density Functions - Donald R Holt - 1973 - p 187.
"""
xs = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]
)
density = np.array(
[
.3183, .3096, .2925, .2622, .1591, .1587, .1599, .1635, .0637,
.0729, .0812, .0955, .0318, .0390, .0458, .0586, .0187, .0236,
.0285, .0384
]
)
betas = np.array(
[
0, .25, .5, 1, 0, .25, .5, 1, 0, .25, .5, 1, 0, .25, .5, 1, 0,
.25, .5, 1
]
)
with np.errstate(all='ignore'), warnings.catch_warnings():
warnings.filterwarnings("ignore",
category=RuntimeWarning,
message="Density calculation unstable.*"
)
stats.levy_stable.pdf_default_method = method
# stats.levy_stable.fft_grid_spacing = 0.0001
pdf = stats.levy_stable.pdf(xs, 1, betas, scale=1, loc=0)
assert_almost_equal(
pdf, density, decimal_places, method
)
@pytest.mark.parametrize(
"params,expected",
[
[(1.48, -.22, 0, 1), (0, np.inf, np.nan, np.nan)],
[(2, .9, 10, 1.5), (10, 4.5, 0, 0)]
]
)
def test_stats(self, params, expected):
observed = stats.levy_stable.stats(
params[0], params[1], loc=params[2], scale=params[3],
moments='mvsk'
)
assert_almost_equal(observed, expected)
@pytest.mark.parametrize('alpha', [0.25, 0.5, 0.75])
@pytest.mark.parametrize(
'function,beta,points,expected',
[
(
stats.levy_stable.cdf,
1.0,
np.linspace(-25, 0, 10),
0.0,
),
(
stats.levy_stable.pdf,
1.0,
np.linspace(-25, 0, 10),
0.0,
),
(
stats.levy_stable.cdf,
-1.0,
np.linspace(0, 25, 10),
1.0,
),
(
stats.levy_stable.pdf,
-1.0,
np.linspace(0, 25, 10),
0.0,
)
]
)
def test_distribution_outside_support(
self, alpha, function, beta, points, expected
):
"""Ensure the pdf/cdf routines do not return nan outside support.
This distribution's support becomes truncated in a few special cases:
support is [mu, infty) if alpha < 1 and beta = 1
support is (-infty, mu] if alpha < 1 and beta = -1
Otherwise, the support is all reals. Here, mu is zero by default.
"""
assert 0 < alpha < 1
assert_almost_equal(
function(points, alpha=alpha, beta=beta),
np.full(len(points), expected)
)
@pytest.mark.parametrize(
'x,alpha,beta,expected',
# Reference values from Matlab
# format long
# alphas = [1.7720732804618808, 1.9217001522410235, 1.5654806051633634,
# 1.7420803447784388, 1.5748002527689913];
# betas = [0.5059373136902996, -0.8779442746685926, -0.4016220341911392,
# -0.38180029468259247, -0.25200194914153684];
# x0s = [0, 1e-4, -1e-4];
# for x0 = x0s
# disp("x0 = " + x0)
# for ii = 1:5
# alpha = alphas(ii);
# beta = betas(ii);
# pd = makedist('Stable','alpha',alpha,'beta',beta,'gam',1,'delta',0);
# % we need to adjust x. It is the same as x = 0 In scipy.
# x = x0 - beta * tan(pi * alpha / 2);
# disp(pd.pdf(x))
# end
# end
[
(0, 1.7720732804618808, 0.5059373136902996, 0.278932636798268),
(0, 1.9217001522410235, -0.8779442746685926, 0.281054757202316),
(0, 1.5654806051633634, -0.4016220341911392, 0.271282133194204),
(0, 1.7420803447784388, -0.38180029468259247, 0.280202199244247),
(0, 1.5748002527689913, -0.25200194914153684, 0.280136576218665),
]
)
def test_x_equal_zeta(
self, x, alpha, beta, expected
):
"""Test pdf for x equal to zeta.
With S1 parametrization: x0 = x + zeta if alpha != 1 So, for x = 0, x0
will be close to zeta.
When case "x equal zeta" is not handled properly and quad_eps is not
low enough: - pdf may be less than 0 - logpdf is nan
The points from the parametrize block are found randomly so that PDF is
less than 0.
Reference values taken from MATLAB
https://www.mathworks.com/help/stats/stable-distribution.html
"""
stats.levy_stable.quad_eps = 1.2e-11
assert_almost_equal(
stats.levy_stable.pdf(x, alpha=alpha, beta=beta),
expected,
)
@pytest.mark.xfail
@pytest.mark.parametrize(
# See comment for test_x_equal_zeta for script for reference values
'x,alpha,beta,expected',
[
(1e-4, 1.7720732804618808, 0.5059373136902996, 0.278929165340670),
(1e-4, 1.9217001522410235, -0.8779442746685926, 0.281056564327953),
(1e-4, 1.5654806051633634, -0.4016220341911392, 0.271252432161167),
(1e-4, 1.7420803447784388, -0.38180029468259247, 0.280205311264134),
(1e-4, 1.5748002527689913, -0.25200194914153684, 0.280140965235426),
(-1e-4, 1.7720732804618808, 0.5059373136902996, 0.278936106741754),
(-1e-4, 1.9217001522410235, -0.8779442746685926, 0.281052948629429),
(-1e-4, 1.5654806051633634, -0.4016220341911392, 0.271275394392385),
(-1e-4, 1.7420803447784388, -0.38180029468259247, 0.280199085645099),
(-1e-4, 1.5748002527689913, -0.25200194914153684, 0.280132185432842),
]
)
def test_x_near_zeta(
self, x, alpha, beta, expected
):
"""Test pdf for x near zeta.
With S1 parametrization: x0 = x + zeta if alpha != 1 So, for x = 0, x0
will be close to zeta.
When case "x near zeta" is not handled properly and quad_eps is not
low enough: - pdf may be less than 0 - logpdf is nan
The points from the parametrize block are found randomly so that PDF is
less than 0.
Reference values taken from MATLAB
https://www.mathworks.com/help/stats/stable-distribution.html
"""
stats.levy_stable.quad_eps = 1.2e-11
assert_almost_equal(
stats.levy_stable.pdf(x, alpha=alpha, beta=beta),
expected,
)
@pytest.fixture
def levy_stable_lock(self):
return threading.Lock()
def test_frozen_parameterization_gh20821(self, levy_stable_lock):
# gh-20821 reported that frozen distributions ignore the parameterization.
# Check that this is resolved and that the frozen distribution's
# parameterization can be changed independently of stats.levy_stable
rng = np.random.default_rng
shapes = dict(alpha=1.9, beta=0.1, loc=0.0, scale=1.0)
unfrozen = stats.levy_stable
frozen = stats.levy_stable(**shapes)
with levy_stable_lock:
unfrozen.parameterization = "S0"
frozen.parameterization = "S1"
unfrozen_a = unfrozen.rvs(**shapes, size=10, random_state=rng(329823498))
frozen_a = frozen.rvs(size=10, random_state=rng(329823498))
assert not np.any(frozen_a == unfrozen_a)
unfrozen.parameterization = "S1"
frozen.parameterization = "S0"
unfrozen_b = unfrozen.rvs(**shapes, size=10, random_state=rng(329823498))
frozen_b = frozen.rvs(size=10, random_state=rng(329823498))
assert_equal(frozen_b, unfrozen_a)
assert_equal(unfrozen_b, frozen_a)
def test_frozen_parameterization_gh20821b(self, levy_stable_lock):
# Check that the parameterization of the frozen distribution is that of
# the unfrozen distribution at the time of freezing
rng = np.random.default_rng
shapes = dict(alpha=1.9, beta=0.1, loc=0.0, scale=1.0)
unfrozen = stats.levy_stable
with levy_stable_lock:
unfrozen.parameterization = "S0"
frozen = stats.levy_stable(**shapes)
unfrozen_a = unfrozen.rvs(**shapes, size=10, random_state=rng(329823498))
frozen_a = frozen.rvs(size=10, random_state=rng(329823498))
assert_equal(frozen_a, unfrozen_a)
unfrozen.parameterization = "S1"
frozen = stats.levy_stable(**shapes)
unfrozen_b = unfrozen.rvs(**shapes, size=10, random_state=rng(329823498))
frozen_b = frozen.rvs(size=10, random_state=rng(329823498))
assert_equal(frozen_b, unfrozen_b)
| TestLevyStable |
python | mwaskom__seaborn | doc/tools/nb_to_doc.py | {
"start": 1518,
"end": 5919
} | class ____(Exception):
pass
def pop_recursive(d, key, default=None):
"""dict.pop(key) where `key` is a `.`-delimited list of nested keys.
>>> d = {'a': {'b': 1, 'c': 2}}
>>> pop_recursive(d, 'a.c')
2
>>> d
{'a': {'b': 1}}
"""
nested = key.split('.')
current = d
for k in nested[:-1]:
if hasattr(current, 'get'):
current = current.get(k, {})
else:
return default
if not hasattr(current, 'pop'):
return default
return current.pop(nested[-1], default)
def strip_output(nb):
"""
Strip the outputs, execution count/prompt number and miscellaneous
metadata from a notebook object, unless specified to keep either the
outputs or counts.
"""
keys = {'metadata': [], 'cell': {'metadata': ["execution"]}}
nb.metadata.pop('signature', None)
nb.metadata.pop('widgets', None)
for field in keys['metadata']:
pop_recursive(nb.metadata, field)
if 'NB_KERNEL' in os.environ:
nb.metadata['kernelspec']['name'] = os.environ['NB_KERNEL']
nb.metadata['kernelspec']['display_name'] = os.environ['NB_KERNEL']
for cell in nb.cells:
if 'outputs' in cell:
cell['outputs'] = []
if 'prompt_number' in cell:
cell['prompt_number'] = None
if 'execution_count' in cell:
cell['execution_count'] = None
# Always remove this metadata
for output_style in ['collapsed', 'scrolled']:
if output_style in cell.metadata:
cell.metadata[output_style] = False
if 'metadata' in cell:
for field in ['collapsed', 'scrolled', 'ExecuteTime']:
cell.metadata.pop(field, None)
for (extra, fields) in keys['cell'].items():
if extra in cell:
for field in fields:
pop_recursive(getattr(cell, extra), field)
return nb
if __name__ == "__main__":
# Get the desired ipynb file path and parse into components
_, fpath, outdir = sys.argv
basedir, fname = os.path.split(fpath)
fstem = fname[:-6]
# Read the notebook
with open(fpath) as f:
nb = nbformat.read(f, as_version=4)
# Run the notebook
kernel = os.environ.get("NB_KERNEL", None)
if kernel is None:
kernel = nb["metadata"]["kernelspec"]["name"]
ep = ExecutePreprocessor(
timeout=600,
kernel_name=kernel,
extra_arguments=["--InlineBackend.rc=figure.dpi=88"]
)
ep.preprocess(nb, {"metadata": {"path": basedir}})
# Remove plain text execution result outputs
for cell in nb.get("cells", {}):
if "show-output" in cell["metadata"].get("tags", []):
continue
fields = cell.get("outputs", [])
for field in fields:
if field["output_type"] == "execute_result":
data_keys = field["data"].keys()
for key in list(data_keys):
if key == "text/plain":
field["data"].pop(key)
if not field["data"]:
fields.remove(field)
# Convert to .rst formats
exp = RSTExporter()
c = Config()
c.TagRemovePreprocessor.remove_cell_tags = {"hide"}
c.TagRemovePreprocessor.remove_input_tags = {"hide-input"}
c.TagRemovePreprocessor.remove_all_outputs_tags = {"hide-output"}
c.ExtractOutputPreprocessor.output_filename_template = \
f"{fstem}_files/{fstem}_" + "{cell_index}_{index}{extension}"
exp.register_preprocessor(TagRemovePreprocessor(config=c), True)
exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)
body, resources = exp.from_notebook_node(nb)
# Clean the output on the notebook and save a .ipynb back to disk
nb = strip_output(nb)
with open(fpath, "wt") as f:
nbformat.write(nb, f)
# Write the .rst file
rst_path = os.path.join(outdir, f"{fstem}.rst")
with open(rst_path, "w") as f:
f.write(body)
# Write the individual image outputs
imdir = os.path.join(outdir, f"{fstem}_files")
if not os.path.exists(imdir):
os.mkdir(imdir)
for imname, imdata in resources["outputs"].items():
if imname.startswith(fstem):
impath = os.path.join(outdir, f"{imname}")
with open(impath, "wb") as f:
f.write(imdata)
| MetadataError |
python | ray-project__ray | python/ray/tests/kuberay/test_autoscaling_e2e.py | {
"start": 1850,
"end": 14886
} | class ____(unittest.TestCase):
"""e2e verification of autoscaling following the steps in the Ray documentation.
kubectl is used throughout, as that reflects the instructions in the docs.
"""
def _get_ray_cr_config(
self, min_replicas=0, cpu_replicas=0, gpu_replicas=0
) -> Dict[str, Any]:
"""Get Ray CR config yaml.
- Use configurable replica fields for a CPU workerGroup.
- Add a GPU-annotated group for testing GPU upscaling.
- Fill in Ray image, autoscaler image, and image pull policies from env
variables.
"""
if AUTOSCALER_V2 == "True":
with open(EXAMPLE_CLUSTER_PATH_V2) as ray_cr_config_file:
ray_cr_config_str = ray_cr_config_file.read()
else:
with open(EXAMPLE_CLUSTER_PATH) as ray_cr_config_file:
ray_cr_config_str = ray_cr_config_file.read()
for k8s_object in yaml.safe_load_all(ray_cr_config_str):
if k8s_object["kind"] in ["RayCluster", "RayJob", "RayService"]:
config = k8s_object
break
head_group = config["spec"]["headGroupSpec"]
if "rayStartParams" not in head_group:
head_group["rayStartParams"] = {}
head_group["rayStartParams"][
"resources"
] = '"{\\"Custom1\\": 1, \\"Custom2\\": 5}"'
cpu_group = config["spec"]["workerGroupSpecs"][0]
cpu_group["replicas"] = cpu_replicas
cpu_group["minReplicas"] = min_replicas
# Keep maxReplicas big throughout the test.
cpu_group["maxReplicas"] = 300
if "rayStartParams" not in cpu_group:
cpu_group["rayStartParams"] = {}
cpu_group["rayStartParams"][
"resources"
] = '"{\\"Custom1\\": 1, \\"Custom2\\": 5}"'
# Add a GPU-annotated group.
# (We're not using real GPUs, just adding a GPU annotation for the autoscaler
# and Ray scheduler.)
gpu_group = copy.deepcopy(cpu_group)
if "rayStartParams" not in gpu_group:
gpu_group["rayStartParams"] = {}
gpu_group["rayStartParams"]["num-gpus"] = "1"
gpu_group["replicas"] = gpu_replicas
gpu_group["minReplicas"] = 0
gpu_group["maxReplicas"] = 1
gpu_group["groupName"] = "fake-gpu-group"
config["spec"]["workerGroupSpecs"].append(gpu_group)
# Substitute images.
for group_spec in config["spec"]["workerGroupSpecs"] + [
config["spec"]["headGroupSpec"]
]:
containers = group_spec["template"]["spec"]["containers"]
ray_container = containers[0]
# Confirm the first container in the example config is the Ray container.
assert ray_container["name"] in ["ray-head", "ray-worker"]
# ("machine-learning" is the name of the worker Ray container)
ray_container["image"] = RAY_IMAGE
for container in containers:
container["imagePullPolicy"] = PULL_POLICY
autoscaler_options = {
"image": AUTOSCALER_IMAGE,
"imagePullPolicy": PULL_POLICY,
# Allow quick scale-down for test purposes.
"idleTimeoutSeconds": 10,
}
config["spec"]["autoscalerOptions"] = autoscaler_options
return config
def _apply_ray_cr(
self,
min_replicas=0,
cpu_replicas=0,
gpu_replicas=0,
validate_replicas: bool = False,
) -> None:
"""Apply Ray CR config yaml, with configurable replica fields for the cpu
workerGroup.
If the CR does not yet exist, `replicas` can be set as desired.
If the CR does already exist, the recommended usage is this:
(1) Set `cpu_replicas` and `gpu_replicas` to what we currently expect them
to be.
(2) Set `validate_replicas` to True. We will then check that the replicas
set on the CR coincides with `replicas`.
"""
if validate_replicas:
raycluster = get_raycluster(
RAY_CLUSTER_NAME, namespace=RAY_CLUSTER_NAMESPACE
)
assert raycluster["spec"]["workerGroupSpecs"][0]["replicas"] == cpu_replicas
assert raycluster["spec"]["workerGroupSpecs"][1]["replicas"] == gpu_replicas
logger.info(
f"Validated that cpu and gpu worker replicas for "
f"{RAY_CLUSTER_NAME} are currently {cpu_replicas} and"
f" {gpu_replicas}, respectively."
)
cr_config = self._get_ray_cr_config(
min_replicas=min_replicas,
cpu_replicas=cpu_replicas,
gpu_replicas=gpu_replicas,
)
with tempfile.NamedTemporaryFile("w") as config_file:
yaml.dump(cr_config, config_file)
config_file.flush()
subprocess.check_call(
["kubectl", "apply", "-f", config_file.name],
stdout=sys.stdout,
stderr=sys.stderr,
)
def testAutoscaling(self):
"""Test the following behaviors:
1. Spinning up a Ray cluster
2. Scaling up Ray workers via autoscaler.sdk.request_resources()
3. Scaling up by updating the CRD's minReplicas
4. Scaling down by removing the resource request and reducing maxReplicas
5. Autoscaler recognizes GPU annotations and Ray custom resources.
6. Autoscaler and operator ignore pods marked for deletion.
7. Autoscaler logs work. Autoscaler events are piped to the driver.
8. Ray utils show correct resource limits in the head container.
TODO (Dmitri): Split up the test logic.
Too much is stuffed into this one test case.
Resources requested by this test are safely within the bounds of an m5.xlarge
instance.
The resource REQUESTS are:
- One Ray head pod
- Autoscaler: .25 CPU, .5 Gi memory
- Ray node: .5 CPU, .5 Gi memeory
- Three Worker pods
- Ray node: .5 CPU, .5 Gi memory
Total: 2.25 CPU, 2.5 Gi memory.
Including operator and system pods, the total CPU requested is around 3.
The cpu LIMIT of each Ray container is 1.
The `num-cpus` arg to Ray start is 1 for each Ray container; thus Ray accounts
1 CPU for each Ray node in the test.
"""
switch_to_ray_parent_dir()
# Cluster creation
logger.info("Creating a RayCluster with no worker pods.")
self._apply_ray_cr(min_replicas=0, cpu_replicas=0, gpu_replicas=0)
logger.info("Confirming presence of head.")
wait_for_pods(goal_num_pods=1, namespace=RAY_CLUSTER_NAMESPACE)
logger.info("Waiting for head pod to start Running.")
wait_for_pod_to_start(
pod_name_filter=HEAD_POD_PREFIX, namespace=RAY_CLUSTER_NAMESPACE
)
logger.info("Confirming Ray is up on the head pod.")
wait_for_ray_health(
pod_name_filter=HEAD_POD_PREFIX, namespace=RAY_CLUSTER_NAMESPACE
)
head_pod = get_pod(
pod_name_filter=HEAD_POD_PREFIX, namespace=RAY_CLUSTER_NAMESPACE
)
assert head_pod, "Could not find the Ray head pod."
# Confirm head pod resource allocation.
# (This is a misplaced test of Ray's resource detection in containers.
# See the TODO in the docstring.)
logger.info("Confirming head pod resource allocation.")
out = kubectl_exec_python_script( # Interaction mode #1: `kubectl exec`
script_name="check_cpu_and_memory.py",
pod=head_pod,
container="ray-head",
namespace="default",
)
# Scale-up
logger.info("Scaling up to one worker via Ray resource request.")
# The request for 2 cpus should give us a 1-cpu head (already present) and a
# 1-cpu worker (will await scale-up).
kubectl_exec_python_script( # Interaction mode #1: `kubectl exec`
script_name="scale_up.py",
pod=head_pod,
container="ray-head",
namespace="default",
)
# Check that stdout autoscaler logging is working.
logs = kubectl_logs(head_pod, namespace="default", container="autoscaler")
assert "Adding 1 node(s) of type small-group." in logs
logger.info("Confirming number of workers.")
wait_for_pods(goal_num_pods=2, namespace=RAY_CLUSTER_NAMESPACE)
# Ray CR updates.
logger.info("Scaling up to two workers by editing minReplicas.")
# replicas=1 reflects the current number of workers
# (which is what we expect to be already present in the Ray CR)
self._apply_ray_cr(
min_replicas=2,
cpu_replicas=1,
gpu_replicas=0,
# Confirm CPU, GPU replicas set on the Ray CR by the autoscaler are 1, 0:
validate_replicas=True,
)
logger.info("Confirming number of workers.")
wait_for_pods(goal_num_pods=3, namespace=RAY_CLUSTER_NAMESPACE)
# GPU upscaling.
# 1. Check we haven't spuriously already started a fake GPU node.
assert not any(
"gpu" in pod_name
for pod_name in get_pod_names(namespace=RAY_CLUSTER_NAMESPACE)
)
# 2. Trigger GPU upscaling by requesting placement of a GPU actor.
logger.info("Scheduling an Actor with GPU demands.")
kubectl_exec_python_script(
script_name="gpu_actor_placement.py",
pod=head_pod,
container="ray-head",
namespace="default",
)
# 3. Confirm new pod number and presence of fake GPU worker.
logger.info("Confirming fake GPU worker up-scaling.")
wait_for_pods(goal_num_pods=4, namespace=RAY_CLUSTER_NAMESPACE)
gpu_workers = [
pod_name
for pod_name in get_pod_names(namespace=RAY_CLUSTER_NAMESPACE)
if "gpu" in pod_name
]
assert len(gpu_workers) == 1
# 4. Confirm that the GPU actor is up and that Ray believes
# the node the actor is on has a GPU.
logger.info("Confirming GPU actor placement.")
out = kubectl_exec_python_script(
script_name="gpu_actor_validation.py",
pod=head_pod,
container="ray-head",
namespace="default",
)
# Confirms the actor was placed on a GPU-annotated node.
# (See gpu_actor_validation.py for details.)
assert "on-a-gpu-node" in out
# Scale-down
logger.info("Reducing min workers to 0.")
# Max workers remains 300.
self._apply_ray_cr(
min_replicas=0,
cpu_replicas=2,
gpu_replicas=1,
# Confirm CPU, GPU replicas set on the Ray CR by the autoscaler are 2, 1:
validate_replicas=True,
)
logger.info("Removing resource demands.")
kubectl_exec_python_script(
script_name="scale_down.py",
pod=head_pod,
container="ray-head",
namespace="default",
)
# Autoscaler should trigger scale-down after resource demands are removed.
logger.info("Confirming workers are gone.")
# Check that stdout autoscaler logging is working.
logs = kubectl_logs(head_pod, namespace="default", container="autoscaler")
assert "Removing 1 nodes of type fake-gpu-group (idle)." in logs
wait_for_pods(goal_num_pods=1, namespace=RAY_CLUSTER_NAMESPACE, tries=120)
# Check custom resource upscaling.
# Submit two {"Custom2": 3} bundles to upscale two workers with 5
# Custom2 capacity each.
logger.info("Scaling up workers with request for custom resources.")
out = kubectl_exec_python_script(
script_name="scale_up_custom.py",
pod=head_pod,
container="ray-head",
namespace="default",
)
assert "Submitted custom scale request!" in out, out
logger.info("Confirming two workers have scaled up.")
wait_for_pods(goal_num_pods=3, namespace=RAY_CLUSTER_NAMESPACE)
# Cluster deletion
logger.info("Deleting Ray cluster.")
kubectl_delete(
kind="raycluster", name=RAY_CLUSTER_NAME, namespace=RAY_CLUSTER_NAMESPACE
)
logger.info("Confirming Ray pods are gone.")
wait_for_pods(goal_num_pods=0, namespace=RAY_CLUSTER_NAMESPACE)
if __name__ == "__main__":
kubeconfig_base64 = os.environ.get("KUBECONFIG_BASE64")
if kubeconfig_base64:
kubeconfig_file = os.environ.get("KUBECONFIG")
if not kubeconfig_file:
raise ValueError("When KUBECONFIG_BASE64 is set, KUBECONFIG must be set.")
with open(kubeconfig_file, "wb") as f:
f.write(base64.b64decode(kubeconfig_base64))
sys.exit(pytest.main(["-vv", __file__]))
| KubeRayAutoscalingTest |
python | encode__httpx | httpx/_exceptions.py | {
"start": 4264,
"end": 4448
} | class ____(TransportError):
"""
Attempted to make a request to an unsupported protocol.
For example issuing a request to `ftp://www.example.com`.
"""
| UnsupportedProtocol |
python | apache__thrift | test/py/TestClient.py | {
"start": 12232,
"end": 13603
} | class ____(TProtocolDecorator.TProtocolDecorator):
"""
Wraps any protocol with sequence ID checking: looks for outbound
uniqueness as well as request/response alignment.
"""
def __init__(self, protocol):
# TProtocolDecorator.__new__ does all the heavy lifting
pass
def writeMessageBegin(self, name, type, seqid):
global LAST_SEQID
if LAST_SEQID and LAST_SEQID == seqid:
raise TProtocol.TProtocolException(
TProtocol.TProtocolException.INVALID_DATA,
"Python client reused sequence ID {0}".format(seqid))
LAST_SEQID = seqid
super(TPedanticSequenceIdProtocolWrapper, self).writeMessageBegin(
name, type, seqid)
def readMessageBegin(self):
global LAST_SEQID
(name, type, seqid) =\
super(TPedanticSequenceIdProtocolWrapper, self).readMessageBegin()
if LAST_SEQID != seqid:
raise TProtocol.TProtocolException(
TProtocol.TProtocolException.INVALID_DATA,
"We sent seqid {0} and server returned seqid {1}".format(
self.last, seqid))
return (name, type, seqid)
def make_pedantic(proto):
""" Wrap a protocol in the pedantic sequence ID wrapper. """
return TPedanticSequenceIdProtocolWrapper(proto)
| TPedanticSequenceIdProtocolWrapper |
python | bokeh__bokeh | src/bokeh/core/property/data_frame.py | {
"start": 2223,
"end": 2851
} | class ____(Property["IntoSeries"]):
""" Accept eager series supported by Narwhals.
This property only exists to support type validation, e.g. for "accepts"
clauses. It is not serializable itself, and is not useful to add to
Bokeh models directly.
"""
def validate(self, value: Any, detail: bool = True) -> None:
import narwhals.stable.v1 as nw
super().validate(value, detail)
if nw.dependencies.is_into_series(value):
return
msg = "" if not detail else f"expected object convertible to Narwhals Series, got {value!r}"
raise ValueError(msg)
| EagerSeries |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 5887,
"end": 6064
} | class ____(XsdLong):
@classmethod
def validate(cls, value: Any) -> None:
cls.validate_int_in_range(value, -27273042329600, 27273042316900)
| ST_CoordinateUnqualified |
python | walkccc__LeetCode | solutions/1169. Invalid Transactions/1169.py | {
"start": 0,
"end": 704
} | class ____:
def invalidTransactions(self, transactions: list[str]) -> list[str]:
ans = []
nameToTrans = collections.defaultdict(list)
for t in transactions:
name, time, amount, city = t.split(',')
time, amount = int(time), int(amount)
nameToTrans[name].append({'time': time, 'city': city})
for t in transactions:
name, time, amount, city = t.split(',')
time, amount = int(time), int(amount)
if amount > 1000:
ans.append(t)
elif name in nameToTrans:
for sameName in nameToTrans[name]:
if abs(sameName['time'] - time) <= 60 and sameName['city'] != city:
ans.append(t)
break
return ans
| Solution |
python | gevent__gevent | src/greentest/3.12/test_subprocess.py | {
"start": 2752,
"end": 70872
} | class ____(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
with self.assertRaisesRegex(ValueError,
"stdout argument not allowed, it will be overridden"):
subprocess.check_output([], stdout=None)
with self.assertRaisesRegex(ValueError,
"check argument not allowed, it will be overridden"):
subprocess.check_output([], check=False)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_input_none(self):
"""input=None has a legacy meaning of input='' on check_output."""
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None)
self.assertNotIn(b'XX', output)
def test_check_output_input_none_text(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, text=True)
self.assertNotIn('XX', output)
def test_check_output_input_none_universal_newlines(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, universal_newlines=True)
self.assertNotIn('XX', output)
def test_check_output_input_none_encoding_errors(self):
output = subprocess.check_output(
[sys.executable, "-c", "print('foo')"],
input=None, encoding='utf-8', errors='ignore')
self.assertIn('foo', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import time; time.sleep(3600)"],
timeout=0.1)
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with os_helper.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"buf = sys.stdout.buffer; "
"buf.write(os.getcwd().encode()); "
"buf.flush(); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode()))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with os_helper.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with os_helper.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with os_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertEqual(stdout, b'42')
self.assertEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
@unittest.skipUnless(fcntl and hasattr(fcntl, 'F_GETPIPE_SZ'),
'fcntl.F_GETPIPE_SZ required for test.')
def test_pipesizes(self):
test_pipe_r, test_pipe_w = os.pipe()
try:
# Get the default pipesize with F_GETPIPE_SZ
pipesize_default = fcntl.fcntl(test_pipe_w, fcntl.F_GETPIPE_SZ)
finally:
os.close(test_pipe_r)
os.close(test_pipe_w)
pipesize = pipesize_default // 2
pagesize_default = support.get_pagesize()
if pipesize < pagesize_default: # the POSIX minimum
raise unittest.SkipTest(
'default pipesize too small to perform test.')
p = subprocess.Popen(
[sys.executable, "-c",
'import sys; sys.stdin.read(); sys.stdout.write("out"); '
'sys.stderr.write("error!")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pipesize=pipesize)
try:
for fifo in [p.stdin, p.stdout, p.stderr]:
self.assertEqual(
fcntl.fcntl(fifo.fileno(), fcntl.F_GETPIPE_SZ),
pipesize)
# Windows pipe size can be acquired via GetNamedPipeInfoFunction
# https://docs.microsoft.com/en-us/windows/win32/api/namedpipeapi/nf-namedpipeapi-getnamedpipeinfo
# However, this function is not yet in _winapi.
p.stdin.write(b"pear")
p.stdin.close()
p.stdout.close()
p.stderr.close()
finally:
p.kill()
p.wait()
@unittest.skipUnless(fcntl and hasattr(fcntl, 'F_GETPIPE_SZ'),
'fcntl.F_GETPIPE_SZ required for test.')
def test_pipesize_default(self):
proc = subprocess.Popen(
[sys.executable, "-c",
'import sys; sys.stdin.read(); sys.stdout.write("out"); '
'sys.stderr.write("error!")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pipesize=-1)
with proc:
try:
fp_r, fp_w = os.pipe()
try:
default_read_pipesize = fcntl.fcntl(fp_r, fcntl.F_GETPIPE_SZ)
default_write_pipesize = fcntl.fcntl(fp_w, fcntl.F_GETPIPE_SZ)
finally:
os.close(fp_r)
os.close(fp_w)
self.assertEqual(
fcntl.fcntl(proc.stdin.fileno(), fcntl.F_GETPIPE_SZ),
default_read_pipesize)
self.assertEqual(
fcntl.fcntl(proc.stdout.fileno(), fcntl.F_GETPIPE_SZ),
default_write_pipesize)
self.assertEqual(
fcntl.fcntl(proc.stderr.fileno(), fcntl.F_GETPIPE_SZ),
default_write_pipesize)
# On other platforms we cannot test the pipe size (yet). But above
# code using pipesize=-1 should not crash.
finally:
proc.kill()
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
@unittest.skipUnless(sys.platform == "win32", "Windows only issue")
def test_win32_duplicate_envs(self):
newenv = os.environ.copy()
newenv["fRUit"] = "cherry"
newenv["fruit"] = "lemon"
newenv["FRUIT"] = "orange"
newenv["frUit"] = "banana"
with subprocess.Popen(["CMD", "/c", "SET", "fruit"],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, _ = p.communicate()
self.assertEqual(stdout.strip(), b"frUit=banana")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
@unittest.skipIf(check_sanitizer(address=True),
'AddressSanitizer adds to the environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'without some system environments.')
@unittest.skipIf(check_sanitizer(address=True),
'AddressSanitizer adds to the environment.')
def test_one_environment_variable(self):
newenv = {'fruit': 'orange'}
cmd = [sys.executable, '-c',
'import sys,os;'
'sys.stdout.write("fruit="+os.getenv("fruit"))']
if sys.platform == "win32":
cmd = ["CMD", "/c", "SET", "fruit"]
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=newenv) as p:
stdout, stderr = p.communicate()
if p.returncode and support.verbose:
print("STDOUT:", stdout.decode("ascii", "replace"))
print("STDERR:", stderr.decode("ascii", "replace"))
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout.strip(), b"fruit=orange")
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
@unittest.skipUnless(sys.platform == "win32", "Windows only issue")
def test_win32_invalid_env(self):
# '=' in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
newenv = os.environ.copy()
newenv["==FRUIT"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, os_helper.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
@support.requires_resource('cpu')
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(OSError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_repr(self):
cases = [
("ls", True, 123, "<Popen: returncode: 123 args: 'ls'>"),
('a' * 100, True, 0,
"<Popen: returncode: 0 args: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...>"),
(["ls"], False, None, "<Popen: returncode: None args: ['ls']>"),
(["ls", '--my-opts', 'a' * 100], False, None,
"<Popen: returncode: None args: ['ls', '--my-opts', 'aaaaaaaaaaaaaaaaaaaaaaaa...>"),
(os_helper.FakePath("my-tool.py"), False, 7,
"<Popen: returncode: 7 args: <FakePath 'my-tool.py'>>")
]
with unittest.mock.patch.object(subprocess.Popen, '_execute_child'):
for cmd, shell, code, sx in cases:
p = subprocess.Popen(cmd, shell=shell)
p.returncode = code
self.assertEqual(repr(p), sx)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
def test_class_getitems(self):
self.assertIsInstance(subprocess.Popen[bytes], types.GenericAlias)
self.assertIsInstance(subprocess.CompletedProcess[str], types.GenericAlias)
@unittest.skipIf(not sysconfig.get_config_var("HAVE_VFORK"),
"vfork() not enabled by configure.")
@mock.patch("subprocess._fork_exec")
def test__use_vfork(self, mock_fork_exec):
self.assertTrue(subprocess._USE_VFORK) # The default value regardless.
mock_fork_exec.side_effect = RuntimeError("just testing args")
with self.assertRaises(RuntimeError):
subprocess.run([sys.executable, "-c", "pass"])
mock_fork_exec.assert_called_once()
self.assertTrue(mock_fork_exec.call_args.args[-1])
with mock.patch.object(subprocess, '_USE_VFORK', False):
with self.assertRaises(RuntimeError):
subprocess.run([sys.executable, "-c", "pass"])
self.assertFalse(mock_fork_exec.call_args_list[-1].args[-1])
@unittest.skipUnless(hasattr(subprocess, '_winapi'),
'need subprocess._winapi')
def test_wait_negative_timeout(self):
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc:
patch = mock.patch.object(
subprocess._winapi,
'WaitForSingleObject',
return_value=subprocess._winapi.WAIT_OBJECT_0)
with patch as mock_wait:
proc.wait(-1) # negative timeout
mock_wait.assert_called_once_with(proc._handle, 0)
proc.returncode = None
self.assertEqual(proc.wait(), 0)
| ProcessTestCase |
python | walkccc__LeetCode | solutions/2576. Find the Maximum Number of Marked Indices/2576.py | {
"start": 0,
"end": 377
} | class ____:
def maxNumOfMarkedIndices(self, nums: list[int]) -> int:
nums.sort()
def isPossible(m: int) -> bool:
for i in range(m):
if 2 * nums[i] > nums[-m + i]:
return False
return True
l = bisect.bisect_left(range(len(nums) // 2 + 1), True,
key=lambda m: not isPossible(m))
return (l - 1) * 2
| Solution |
python | cython__cython | Cython/Build/Tests/TestCyCache.py | {
"start": 216,
"end": 6554
} | class ____(CythonTest):
def setUp(self):
CythonTest.setUp(self)
self.temp_dir = tempfile.mkdtemp(
prefix='cycache-test',
dir='TEST_TMP' if os.path.isdir('TEST_TMP') else None)
self.src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir)
self.cache_dir = tempfile.mkdtemp(prefix='cache', dir=self.temp_dir)
def cache_files(self, file_glob):
return glob.glob(os.path.join(self.cache_dir, file_glob))
def fresh_cythonize(self, *args, **kwargs):
Cython.Utils.clear_function_caches()
Cython.Build.Dependencies._dep_tree = None # discard method caches
Cython.Build.Dependencies.cythonize(*args, **kwargs)
def fresh_compile(self, *args, **kwargs):
Cython.Utils.clear_function_caches()
Cython.Compiler.Main.compile(*args, **kwargs)
def _test_cycache_switch(self, compilation_method):
content1 = 'value = 1\n'
content2 = 'value = 2\n'
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
with open(a_pyx, 'w') as f:
f.write(content1)
compilation_method(a_pyx, cache=self.cache_dir)
compilation_method(a_pyx, cache=self.cache_dir)
self.assertEqual(1, len(self.cache_files('a.c*')))
with open(a_c) as f:
a_contents1 = f.read()
os.unlink(a_c)
with open(a_pyx, 'w') as f:
f.write(content2)
compilation_method(a_pyx, cache=self.cache_dir)
with open(a_c) as f:
a_contents2 = f.read()
os.unlink(a_c)
self.assertNotEqual(a_contents1, a_contents2, 'C file not changed!')
self.assertEqual(2, len(self.cache_files('a.c*')))
with open(a_pyx, 'w') as f:
f.write(content1)
compilation_method(a_pyx, cache=self.cache_dir)
self.assertEqual(2, len(self.cache_files('a.c*')))
with open(a_c) as f:
a_contents = f.read()
self.assertEqual(
a_contents, a_contents1,
msg='\n'.join(list(difflib.unified_diff(
a_contents.split('\n'), a_contents1.split('\n')))[:10]))
def test_cycache_switch_cythonize(self):
self._test_cycache_switch(self.fresh_cythonize)
def test_cycache_switch_compile(self):
self._test_cycache_switch(self.fresh_compile)
def _test_cycache_uses_cache(self, compilation_method):
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
with open(a_pyx, 'w') as f:
f.write('pass')
compilation_method(a_pyx, cache=self.cache_dir)
a_cache = os.path.join(self.cache_dir, os.listdir(self.cache_dir)[0])
with gzip.GzipFile(a_cache, 'wb') as gzipfile:
gzipfile.write(b'fake stuff')
os.unlink(a_c)
compilation_method(a_pyx, cache=self.cache_dir)
with open(a_c) as f:
a_contents = f.read()
self.assertEqual(a_contents, 'fake stuff',
'Unexpected contents: %s...' % a_contents[:100])
def test_cycache_uses_cache_cythonize(self):
self._test_cycache_uses_cache(self.fresh_cythonize)
def test_cycache_uses_cache_compile(self):
self._test_cycache_uses_cache(self.fresh_compile)
def _test_cycache_annotation(self, compilation_method):
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
a_html = a_pyx[:-4] + '.html'
with open(a_pyx, 'w') as f:
f.write('pass')
compilation_method(a_pyx, cache=self.cache_dir, annotate='default')
self.assertTrue(os.path.exists(a_html), a_html)
os.unlink(a_html)
os.unlink(a_c)
compilation_method(a_pyx, cache=self.cache_dir, annotate='default')
self.assertTrue(os.path.exists(a_html), a_html)
def test_cycache_annotation_cythonize(self):
self._test_cycache_annotation(self.fresh_cythonize)
def test_cycache_annotation_compile(self):
self._test_cycache_annotation(self.fresh_compile)
def _test_multi_file_output(self, compilation_method):
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
a_h = a_pyx[:-4] + '.h'
a_api_h = a_pyx[:-4] + '_api.h'
with open(a_pyx, 'w') as f:
f.write('cdef public api int foo(int x): return x\n')
compilation_method(a_pyx, cache=self.cache_dir)
expected = [a_c, a_h, a_api_h]
for output in expected:
self.assertTrue(os.path.exists(output), output)
os.unlink(output)
compilation_method(a_pyx, cache=self.cache_dir)
for output in expected:
self.assertTrue(os.path.exists(output), output)
def test_multi_file_output_cythonize(self):
self._test_multi_file_output(self.fresh_cythonize)
def test_multi_file_output_compile(self):
self._test_multi_file_output(self.fresh_compile)
def _test_options_invalidation(self, compilation_method):
hash_pyx = os.path.join(self.src_dir, 'options.pyx')
hash_c = hash_pyx[:-len('.pyx')] + '.c'
hash_cpp = hash_pyx[:-len('.pyx')] + '.cpp'
with open(hash_pyx, 'w') as f:
f.write('pass')
compilation_method(hash_pyx, cache=self.cache_dir, cplus=False)
self.assertEqual(1, len(self.cache_files('options.c*')))
os.unlink(hash_c)
compilation_method(hash_pyx, cache=self.cache_dir, cplus=True)
self.assertEqual(2, len(self.cache_files('options.c*')))
try:
os.unlink(hash_c)
except FileNotFoundError:
# fresh_cythonize() produces .c file, fresh_compile produces .cpp file
os.unlink(hash_cpp)
compilation_method(hash_pyx, cache=self.cache_dir, cplus=False, show_version=False)
self.assertEqual(2, len(self.cache_files('options.c*')))
os.unlink(hash_c)
compilation_method(hash_pyx, cache=self.cache_dir, cplus=False, show_version=True)
self.assertEqual(2, len(self.cache_files('options.c*')))
def test_options_invalidation_cythonize(self):
self._test_options_invalidation(self.fresh_cythonize)
def test_options_invalidation_compile(self):
self._test_options_invalidation(self.fresh_compile)
| TestCyCache |
python | ray-project__ray | rllib/utils/replay_buffers/replay_buffer.py | {
"start": 2068,
"end": 14390
} | class ____(ReplayBufferInterface, FaultAwareApply):
"""The lowest-level replay buffer interface used by RLlib.
This class implements a basic ring-type of buffer with random sampling.
ReplayBuffer is the base class for advanced types that add functionality while
retaining compatibility through inheritance.
The following examples show how buffers behave with different storage_units
and capacities. This behaviour is generally similar for other buffers, although
they might not implement all storage_units.
Examples:
.. testcode::
from ray.rllib.utils.replay_buffers.replay_buffer import ReplayBuffer
from ray.rllib.utils.replay_buffers.replay_buffer import StorageUnit
from ray.rllib.policy.sample_batch import SampleBatch
# Store any batch as a whole
buffer = ReplayBuffer(capacity=10, storage_unit=StorageUnit.FRAGMENTS)
buffer.add(SampleBatch({"a": [1], "b": [2, 3, 4]}))
buffer.sample(1)
# Store only complete episodes
buffer = ReplayBuffer(capacity=10,
storage_unit=StorageUnit.EPISODES)
buffer.add(SampleBatch({"c": [1, 2, 3, 4],
SampleBatch.T: [0, 1, 0, 1],
SampleBatch.TERMINATEDS: [False, True, False, True],
SampleBatch.EPS_ID: [0, 0, 1, 1]}))
buffer.sample(1)
# Store single timesteps
buffer = ReplayBuffer(capacity=2, storage_unit=StorageUnit.TIMESTEPS)
buffer.add(SampleBatch({"a": [1, 2], SampleBatch.T: [0, 1]}))
buffer.sample(1)
buffer.add(SampleBatch({"a": [3], SampleBatch.T: [2]}))
print(buffer._eviction_started)
buffer.sample(1)
buffer = ReplayBuffer(capacity=10, storage_unit=StorageUnit.SEQUENCES)
buffer.add(SampleBatch({"c": [1, 2, 3], SampleBatch.SEQ_LENS: [1, 2]}))
buffer.sample(1)
.. testoutput::
True
`True` is not the output of the above testcode, but an artifact of unexpected
behaviour of sphinx doctests.
(see https://github.com/ray-project/ray/pull/32477#discussion_r1106776101)
"""
def __init__(
self,
capacity: int = 10000,
storage_unit: Union[str, StorageUnit] = "timesteps",
**kwargs,
):
"""Initializes a (FIFO) ReplayBuffer instance.
Args:
capacity: Max number of timesteps to store in this FIFO
buffer. After reaching this number, older samples will be
dropped to make space for new ones.
storage_unit: If not a StorageUnit, either 'timesteps', 'sequences' or
'episodes'. Specifies how experiences are stored.
``**kwargs``: Forward compatibility kwargs.
"""
if storage_unit in ["timesteps", StorageUnit.TIMESTEPS]:
self.storage_unit = StorageUnit.TIMESTEPS
elif storage_unit in ["sequences", StorageUnit.SEQUENCES]:
self.storage_unit = StorageUnit.SEQUENCES
elif storage_unit in ["episodes", StorageUnit.EPISODES]:
self.storage_unit = StorageUnit.EPISODES
elif storage_unit in ["fragments", StorageUnit.FRAGMENTS]:
self.storage_unit = StorageUnit.FRAGMENTS
else:
raise ValueError(
f"storage_unit must be either '{StorageUnit.TIMESTEPS}', "
f"'{StorageUnit.SEQUENCES}', '{StorageUnit.EPISODES}' "
f"or '{StorageUnit.FRAGMENTS}', but is {storage_unit}"
)
# The actual storage (list of SampleBatches or MultiAgentBatches).
self._storage = []
# Caps the number of timesteps stored in this buffer
if capacity <= 0:
raise ValueError(
"Capacity of replay buffer has to be greater than zero "
"but was set to {}.".format(capacity)
)
self.capacity = capacity
# The next index to override in the buffer.
self._next_idx = 0
# len(self._hit_count) must always be less than len(capacity)
self._hit_count = np.zeros(self.capacity)
# Whether we have already hit our capacity (and have therefore
# started to evict older samples).
self._eviction_started = False
# Number of (single) timesteps that have been added to the buffer
# over its lifetime. Note that each added item (batch) may contain
# more than one timestep.
self._num_timesteps_added = 0
self._num_timesteps_added_wrap = 0
# Number of (single) timesteps that have been sampled from the buffer
# over its lifetime.
self._num_timesteps_sampled = 0
self._evicted_hit_stats = WindowStat("evicted_hit", 1000)
self._est_size_bytes = 0
self.batch_size = None
@override(ReplayBufferInterface)
def __len__(self) -> int:
return len(self._storage)
@override(ReplayBufferInterface)
def add(self, batch: SampleBatchType, **kwargs) -> None:
"""Adds a batch of experiences or other data to this buffer.
Splits batch into chunks of timesteps, sequences or episodes, depending on
`self._storage_unit`. Calls `self._add_single_batch` to add resulting slices
to the buffer storage.
Args:
batch: The batch to add.
``**kwargs``: Forward compatibility kwargs.
"""
if not batch.count > 0:
return
warn_replay_capacity(item=batch, num_items=self.capacity / batch.count)
if self.storage_unit == StorageUnit.TIMESTEPS:
timeslices = batch.timeslices(1)
for t in timeslices:
self._add_single_batch(t, **kwargs)
elif self.storage_unit == StorageUnit.SEQUENCES:
timestep_count = 0
for seq_len in batch.get(SampleBatch.SEQ_LENS):
start_seq = timestep_count
end_seq = timestep_count + seq_len
self._add_single_batch(batch[start_seq:end_seq], **kwargs)
timestep_count = end_seq
elif self.storage_unit == StorageUnit.EPISODES:
for eps in batch.split_by_episode():
if eps.get(SampleBatch.T, [0])[0] == 0 and (
eps.get(SampleBatch.TERMINATEDS, [True])[-1]
or eps.get(SampleBatch.TRUNCATEDS, [False])[-1]
):
# Only add full episodes to the buffer
# Check only if info is available
self._add_single_batch(eps, **kwargs)
else:
if log_once("only_full_episodes"):
logger.info(
"This buffer uses episodes as a storage "
"unit and thus allows only full episodes "
"to be added to it (starting from T=0 and ending in "
"`terminateds=True` or `truncateds=True`. "
"Some samples may be dropped."
)
elif self.storage_unit == StorageUnit.FRAGMENTS:
self._add_single_batch(batch, **kwargs)
@DeveloperAPI
def _add_single_batch(self, item: SampleBatchType, **kwargs) -> None:
"""Add a SampleBatch of experiences to self._storage.
An item consists of either one or more timesteps, a sequence or an
episode. Differs from add() in that it does not consider the storage
unit or type of batch and simply stores it.
Args:
item: The batch to be added.
``**kwargs``: Forward compatibility kwargs.
"""
self._num_timesteps_added += item.count
self._num_timesteps_added_wrap += item.count
if self._next_idx >= len(self._storage):
self._storage.append(item)
self._est_size_bytes += item.size_bytes()
else:
item_to_be_removed = self._storage[self._next_idx]
self._est_size_bytes -= item_to_be_removed.size_bytes()
self._storage[self._next_idx] = item
self._est_size_bytes += item.size_bytes()
# Eviction of older samples has already started (buffer is "full").
if self._eviction_started:
self._evicted_hit_stats.push(self._hit_count[self._next_idx])
self._hit_count[self._next_idx] = 0
# Wrap around storage as a circular buffer once we hit capacity.
if self._num_timesteps_added_wrap >= self.capacity:
self._eviction_started = True
self._num_timesteps_added_wrap = 0
self._next_idx = 0
else:
self._next_idx += 1
@override(ReplayBufferInterface)
def sample(
self, num_items: Optional[int] = None, **kwargs
) -> Optional[SampleBatchType]:
"""Samples `num_items` items from this buffer.
The items depend on the buffer's storage_unit.
Samples in the results may be repeated.
Examples for sampling results:
1) If storage unit 'timesteps' has been chosen and batches of
size 5 have been added, sample(5) will yield a concatenated batch of
15 timesteps.
2) If storage unit 'sequences' has been chosen and sequences of
different lengths have been added, sample(5) will yield a concatenated
batch with a number of timesteps equal to the sum of timesteps in
the 5 sampled sequences.
3) If storage unit 'episodes' has been chosen and episodes of
different lengths have been added, sample(5) will yield a concatenated
batch with a number of timesteps equal to the sum of timesteps in
the 5 sampled episodes.
Args:
num_items: Number of items to sample from this buffer.
``**kwargs``: Forward compatibility kwargs.
Returns:
Concatenated batch of items.
"""
if len(self) == 0:
raise ValueError("Trying to sample from an empty buffer.")
idxes = [random.randint(0, len(self) - 1) for _ in range(num_items)]
sample = self._encode_sample(idxes)
self._num_timesteps_sampled += sample.count
return sample
@DeveloperAPI
def stats(self, debug: bool = False) -> dict:
"""Returns the stats of this buffer.
Args:
debug: If True, adds sample eviction statistics to the returned
stats dict.
Returns:
A dictionary of stats about this buffer.
"""
data = {
"added_count": self._num_timesteps_added,
"added_count_wrapped": self._num_timesteps_added_wrap,
"eviction_started": self._eviction_started,
"sampled_count": self._num_timesteps_sampled,
"est_size_bytes": self._est_size_bytes,
"num_entries": len(self._storage),
}
if debug:
data.update(self._evicted_hit_stats.stats())
return data
@override(ReplayBufferInterface)
def get_state(self) -> Dict[str, Any]:
state = {"_storage": self._storage, "_next_idx": self._next_idx}
state.update(self.stats(debug=False))
return state
@override(ReplayBufferInterface)
def set_state(self, state: Dict[str, Any]) -> None:
# The actual storage.
self._storage = state["_storage"]
self._next_idx = state["_next_idx"]
# Stats and counts.
self._num_timesteps_added = state["added_count"]
self._num_timesteps_added_wrap = state["added_count_wrapped"]
self._eviction_started = state["eviction_started"]
self._num_timesteps_sampled = state["sampled_count"]
self._est_size_bytes = state["est_size_bytes"]
@DeveloperAPI
def _encode_sample(self, idxes: List[int]) -> SampleBatchType:
"""Fetches concatenated samples at given indices from the storage."""
samples = []
for i in idxes:
self._hit_count[i] += 1
samples.append(self._storage[i])
if samples:
# We assume all samples are of same type
out = concat_samples(samples)
else:
out = SampleBatch()
out.decompress_if_needed()
return out
| ReplayBuffer |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 21802,
"end": 23649
} | class ____(OrganizationNameMixin, SaveOrganizationForm):
__params__ = ["name"] + SaveOrganizationForm.__params__
_max_apps = wtforms.IntegerField()
membership_size = wtforms.SelectField(
choices=[(size.value, size.value) for size in OrganizationMembershipSize],
default=None,
coerce=OrganizationMembershipSize,
validators=[
wtforms.validators.InputRequired(
message="Select organization membership size"
),
],
)
usage = wtforms.TextAreaField(
validators=[
wtforms.validators.Length(
min=32,
message=(
"Tell us a little more about how you plan to use PyPI Organizations"
),
),
wtforms.validators.Length(
max=1024,
message=_(
"We don't need to know quite that much :), "
"limit your usage description to 1024 characters or less."
),
),
]
)
def __init__(
self, *args, organization_service, user, max_applications=None, **kwargs
):
super().__init__(*args, **kwargs)
self.organization_service = organization_service
self.user = user
self.max_applications = max_applications
def validate__max_apps(self, field):
if (
self.max_applications is not None
and len(self.user.organization_applications) >= self.max_applications
):
self.form_errors.append(
_(
"You have already submitted the maximum number of "
f"Organization requests ({self.max_applications})."
)
)
return False
return True
| CreateOrganizationApplicationForm |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 67782,
"end": 68302
} | class ____(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a flat list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(choices=('poor', 'medium', 'good'))
| TestChoiceFieldWithListChoices |
python | huggingface__transformers | src/transformers/models/tapas/modeling_tapas.py | {
"start": 6424,
"end": 11426
} | class ____(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
self.layer_idx = layer_idx
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
past_key_values=None,
output_attentions=False,
cache_position=None,
):
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
is_updated = False
is_cross_attention = encoder_hidden_states is not None
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_layer from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_layer = curr_past_key_values.layers[self.layer_idx].keys
value_layer = curr_past_key_values.layers[self.layer_idx].values
else:
key_layer = (
self.key(current_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(current_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
if past_key_values is not None:
# save all key/value_layer to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_layer, value_layer = curr_past_key_values.update(
key_layer, value_layer, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TapasModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_values,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
| TapasSelfAttention |
python | streamlit__streamlit | lib/streamlit/elements/widgets/slider.py | {
"start": 7477,
"end": 38248
} | class ____:
# If min/max/value/step are not provided, then we return an int.
# if ONLY step is provided, then it must be an int and we return an int.
@overload
def slider(
self,
label: str,
min_value: None = None,
max_value: None = None,
value: None = None,
step: int | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> int: ...
# If min-value or max_value is provided and a numeric type, and value (if provided)
# is a singular numeric, return the same numeric type.
@overload
def slider(
self,
label: str,
min_value: SliderNumericT | None = None,
max_value: SliderNumericT | None = None,
value: SliderNumericT | None = None,
step: StepNumericT[SliderNumericT] | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> SliderNumericT: ...
# If value is provided and a sequence of numeric type,
# return a tuple of the same numeric type.
@overload
def slider(
self,
label: str,
min_value: SliderNumericT | None = None,
max_value: SliderNumericT | None = None,
*,
value: SliderNumericSpanT[SliderNumericT],
step: StepNumericT[SliderNumericT] | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> tuple[SliderNumericT, SliderNumericT]: ...
# If value is provided positionally and a sequence of numeric type,
# return a tuple of the same numeric type.
@overload
def slider(
self,
label: str,
min_value: SliderNumericT,
max_value: SliderNumericT,
value: SliderNumericSpanT[SliderNumericT],
step: StepNumericT[SliderNumericT] | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> tuple[SliderNumericT, SliderNumericT]: ...
# If min-value is provided and a datelike type, and value (if provided)
# is a singular datelike, return the same datelike type.
@overload
def slider(
self,
label: str,
min_value: SliderDatelikeT,
max_value: SliderDatelikeT | None = None,
value: SliderDatelikeT | None = None,
step: StepDatelikeT | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> SliderDatelikeT: ...
# If max-value is provided and a datelike type, and value (if provided)
# is a singular datelike, return the same datelike type.
@overload
def slider(
self,
label: str,
min_value: None = None,
*,
max_value: SliderDatelikeT,
value: SliderDatelikeT | None = None,
step: StepDatelikeT | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> SliderDatelikeT: ...
# If value is provided and a datelike type, return the same datelike type.
@overload
def slider(
self,
label: str,
min_value: None = None,
max_value: None = None,
*,
value: SliderDatelikeT,
step: StepDatelikeT | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> SliderDatelikeT: ...
# If value is provided and a sequence of datelike type,
# return a tuple of the same datelike type.
@overload
def slider(
self,
label: str,
min_value: SliderDatelikeT | None = None,
max_value: SliderDatelikeT | None = None,
*,
value: list[SliderDatelikeT]
| tuple[SliderDatelikeT]
| tuple[SliderDatelikeT, SliderDatelikeT],
step: StepDatelikeT | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> tuple[SliderDatelikeT, SliderDatelikeT]: ...
# If value is provided positionally and a sequence of datelike type,
# return a tuple of the same datelike type.
@overload
def slider(
self,
label: str,
min_value: SliderDatelikeT,
max_value: SliderDatelikeT,
value: SliderDatelikeSpanT[SliderDatelikeT],
/,
step: StepDatelikeT | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> tuple[SliderDatelikeT, SliderDatelikeT]: ...
# https://github.com/python/mypy/issues/17614
@gather_metrics("slider") # type: ignore[misc]
def slider(
self,
label: str,
min_value: SliderScalar | None = None,
max_value: SliderScalar | None = None,
value: SliderValue | None = None,
step: SliderStep | None = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> Any:
r"""Display a slider widget.
This supports int, float, date, time, and datetime types.
This also allows you to render a range slider by passing a two-element
tuple or list as the ``value``.
The difference between ``st.slider`` and ``st.select_slider`` is that
``slider`` only accepts numerical or date/time data and takes a range as
input, while ``select_slider`` accepts any datatype and takes an iterable
set of options.
.. note::
Integer values exceeding +/- ``(1<<53) - 1`` cannot be accurately
stored or returned by the widget due to serialization constraints
between the Python server and JavaScript client. You must handle
such numbers as floats, leading to a loss in precision.
Parameters
----------
label : str
A short label explaining to the user what this slider is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
min_value : a supported type or None
The minimum permitted value.
If this is ``None`` (default), the minimum value depends on the
type as follows:
- integer: ``0``
- float: ``0.0``
- date or datetime: ``value - timedelta(days=14)``
- time: ``time.min``
max_value : a supported type or None
The maximum permitted value.
If this is ``None`` (default), the maximum value depends on the
type as follows:
- integer: ``100``
- float: ``1.0``
- date or datetime: ``value + timedelta(days=14)``
- time: ``time.max``
value : a supported type or a tuple/list of supported types or None
The value of the slider when it first renders. If a tuple/list
of two values is passed here, then a range slider with those lower
and upper bounds is rendered. For example, if set to `(1, 10)` the
slider will have a selectable range between 1 and 10.
This defaults to ``min_value``. If the type is not otherwise
specified in any of the numeric parameters, the widget will have an
integer value.
step : int, float, timedelta, or None
The stepping interval.
Defaults to 1 if the value is an int, 0.01 if a float,
timedelta(days=1) if a date/datetime, timedelta(minutes=15) if a time
(or if max_value - min_value < 1 day)
format : str or None
A printf-style format string controlling how the interface should
display numbers. This does not impact the return value.
For information about formatting integers and floats, see
`sprintf.js
<https://github.com/alexei/sprintf.js?tab=readme-ov-file#format-specification>`_.
For example, ``format="%0.1f"`` adjusts the displayed decimal
precision to only show one digit after the decimal.
For information about formatting datetimes, dates, and times, see
`momentJS <https://momentjs.com/docs/#/displaying/format/>`_.
For example, ``format="ddd ha"`` adjusts the displayed datetime to
show the day of the week and the hour ("Tue 8pm").
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_change : callable
An optional callback invoked when this slider's value changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean that disables the slider if set to ``True``.
The default is ``False``.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
If this is ``"collapsed"``, Streamlit displays no label or spacer.
width : "stretch" or int
The width of the slider widget. This can be one of the
following:
- ``"stretch"`` (default): The width of the widget matches the
width of the parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
int/float/date/time/datetime or tuple of int/float/date/time/datetime
The current value of the slider widget. The return type will match
the data type of the value parameter.
Examples
--------
>>> import streamlit as st
>>>
>>> age = st.slider("How old are you?", 0, 130, 25)
>>> st.write("I'm ", age, "years old")
And here's an example of a range slider:
>>> import streamlit as st
>>>
>>> values = st.slider("Select a range of values", 0.0, 100.0, (25.0, 75.0))
>>> st.write("Values:", values)
This is a range time slider:
>>> import streamlit as st
>>> from datetime import time
>>>
>>> appointment = st.slider(
... "Schedule your appointment:", value=(time(11, 30), time(12, 45))
... )
>>> st.write("You're scheduled for:", appointment)
Finally, a datetime slider:
>>> import streamlit as st
>>> from datetime import datetime
>>>
>>> start_time = st.slider(
... "When do you start?",
... value=datetime(2020, 1, 1, 9, 30),
... format="MM/DD/YY - hh:mm",
... )
>>> st.write("Start time:", start_time)
.. output::
https://doc-slider.streamlit.app/
height: 300px
"""
ctx = get_script_run_ctx()
return self._slider(
label=label,
min_value=min_value,
max_value=max_value,
value=value,
step=step,
format=format,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
width=width,
ctx=ctx,
)
def _slider(
self,
label: str,
min_value: Any = None,
max_value: Any = None,
value: Any = None,
step: Any = None,
format: str | None = None,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
ctx: ScriptRunContext | None = None,
) -> SliderReturn:
key = to_key(key)
check_widget_policies(
self.dg,
key,
on_change,
default_value=value,
)
maybe_raise_label_warnings(label, label_visibility)
element_id = compute_and_register_element_id(
"slider",
user_key=key,
# Treat the provided key as the main identity; only include
# changes to the value-shaping arguments in the identity
# computation as those can invalidate the current value.
key_as_main_identity={"min_value", "max_value", "step"},
dg=self.dg,
label=label,
min_value=min_value,
max_value=max_value,
value=value,
step=step,
format=format,
help=help,
width=width,
)
if value is None:
# We need to know if this is a single or range slider, but don't have
# a default value, so we check if session_state can tell us.
# We already calculated the id, so there is no risk of this causing
# the id to change.
single_value = True
session_state = get_session_state().filtered_state
if key is not None and key in session_state:
state_value = session_state[key]
single_value = isinstance(state_value, tuple(SUPPORTED_TYPES.keys()))
if single_value:
value = min_value if min_value is not None else 0
else:
mn = min_value if min_value is not None else 0
mx = max_value if max_value is not None else 100
value = [mn, mx]
# Ensure that the value is either a single value or a range of values.
single_value = isinstance(value, tuple(SUPPORTED_TYPES.keys()))
range_value = isinstance(value, (list, tuple)) and len(value) in (0, 1, 2)
if not single_value and not range_value:
raise StreamlitAPIException(
"Slider value should either be an int/float/datetime or a list/tuple of "
"0 to 2 ints/floats/datetimes"
)
# Simplify future logic by always making value a list
prepared_value: Sequence[SliderScalar] = [value] if single_value else value # ty: ignore[invalid-assignment]
def value_to_generic_type(v: Any) -> SliderProto.DataType.ValueType:
if isinstance(v, Integral):
return SUPPORTED_TYPES[Integral]
if isinstance(v, Real):
return SUPPORTED_TYPES[Real]
return SUPPORTED_TYPES[type(v)]
def all_same_type(items: Any) -> bool:
return len(set(map(value_to_generic_type, items))) < 2
if not all_same_type(prepared_value):
raise StreamlitAPIException(
"Slider tuple/list components must be of the same type.\n"
f"But were: {list(map(type, prepared_value))}"
)
data_type = (
SliderProto.INT
if len(prepared_value) == 0
else value_to_generic_type(prepared_value[0])
)
datetime_min: datetime | time = time.min
datetime_max: datetime | time = time.max
if data_type == SliderProto.TIME:
prepared_value = cast("Sequence[time]", prepared_value)
datetime_min = time.min.replace(tzinfo=prepared_value[0].tzinfo)
datetime_max = time.max.replace(tzinfo=prepared_value[0].tzinfo)
if data_type in (SliderProto.DATETIME, SliderProto.DATE):
prepared_value = cast("Sequence[datetime]", prepared_value)
datetime_min = prepared_value[0] - timedelta(days=14)
datetime_max = prepared_value[0] + timedelta(days=14)
defaults: Final[dict[SliderProto.DataType.ValueType, dict[str, Any]]] = {
SliderProto.INT: {
"min_value": 0,
"max_value": 100,
"step": 1,
"format": "%d",
},
SliderProto.FLOAT: {
"min_value": 0.0,
"max_value": 1.0,
"step": 0.01,
"format": "%0.2f",
},
SliderProto.DATETIME: {
"min_value": datetime_min,
"max_value": datetime_max,
"step": timedelta(days=1),
"format": "YYYY-MM-DD",
},
SliderProto.DATE: {
"min_value": datetime_min,
"max_value": datetime_max,
"step": timedelta(days=1),
"format": "YYYY-MM-DD",
},
SliderProto.TIME: {
"min_value": datetime_min,
"max_value": datetime_max,
"step": timedelta(minutes=15),
"format": "HH:mm",
},
}
if min_value is None:
min_value = defaults[data_type]["min_value"]
if max_value is None:
max_value = defaults[data_type]["max_value"]
if step is None:
step = defaults[data_type]["step"]
if data_type in (
SliderProto.DATETIME,
SliderProto.DATE,
) and max_value - min_value < timedelta(days=1): # ty: ignore[unsupported-operator]
step = timedelta(minutes=15)
if format is None:
format = cast("str", defaults[data_type]["format"]) # noqa: A001
if step == 0:
raise StreamlitAPIException(
"Slider components cannot be passed a `step` of 0."
)
# Ensure that all arguments are of the same type.
slider_args = [min_value, max_value, step]
int_args = all(isinstance(a, Integral) for a in slider_args)
float_args = all(
isinstance(a, Real) and not isinstance(a, Integral) for a in slider_args
)
# When min and max_value are the same timelike, step should be a timedelta
timelike_args = (
data_type in TIMELIKE_TYPES
and isinstance(step, timedelta)
and type(min_value) is type(max_value)
)
if not int_args and not float_args and not timelike_args:
msg = (
"Slider value arguments must be of matching types."
f"\n`min_value` has {type(min_value).__name__} type."
f"\n`max_value` has {type(max_value).__name__} type."
f"\n`step` has {type(step).__name__} type."
)
raise StreamlitAPIException(msg)
# Ensure that the value matches arguments' types.
all_ints = data_type == SliderProto.INT and int_args
all_floats = data_type == SliderProto.FLOAT and float_args
all_timelikes = data_type in TIMELIKE_TYPES and timelike_args
if not all_ints and not all_floats and not all_timelikes:
msg = (
"Both value and arguments must be of the same type."
f"\n`value` has {type(value).__name__} type."
f"\n`min_value` has {type(min_value).__name__} type."
f"\n`max_value` has {type(max_value).__name__} type."
)
raise StreamlitAPIException(msg)
# Ensure that min <= value(s) <= max, adjusting the bounds as necessary.
min_value = min(min_value, max_value)
max_value = max(min_value, max_value)
if len(prepared_value) == 1:
min_value = min(prepared_value[0], min_value)
max_value = max(prepared_value[0], max_value)
elif len(prepared_value) == 2:
start, end = prepared_value
if start > end: # type: ignore[operator]
# Swap start and end, since they seem reversed
start, end = end, start
prepared_value = start, end
min_value = min(start, min_value)
max_value = max(end, max_value)
else:
# Empty list, so let's just use the outer bounds
prepared_value = [min_value, max_value]
# Bounds checks. JSNumber produces human-readable exceptions that
# we simply re-package as StreamlitAPIExceptions.
# (We check `min_value` and `max_value` here; `value` and `step` are
# already known to be in the [min_value, max_value] range.)
try:
if all_ints:
JSNumber.validate_int_bounds(min_value, "`min_value`")
JSNumber.validate_int_bounds(max_value, "`max_value`")
elif all_floats:
JSNumber.validate_float_bounds(min_value, "`min_value`")
JSNumber.validate_float_bounds(max_value, "`max_value`")
elif all_timelikes:
# No validation yet. TODO: check between 0001-01-01 to 9999-12-31
pass
except JSNumberBoundsException as e:
raise StreamlitAPIException(str(e))
orig_tz = None
# Convert dates or times into datetimes
if data_type == SliderProto.TIME:
prepared_value = cast("Sequence[time]", prepared_value)
min_value = cast("time", min_value)
max_value = cast("time", max_value)
prepared_value = list(map(_time_to_datetime, prepared_value))
min_value = _time_to_datetime(min_value)
max_value = _time_to_datetime(max_value)
if data_type == SliderProto.DATE:
prepared_value = cast("Sequence[date]", prepared_value)
min_value = cast("date", min_value)
max_value = cast("date", max_value)
prepared_value = list(map(_date_to_datetime, prepared_value))
min_value = _date_to_datetime(min_value)
max_value = _date_to_datetime(max_value)
# The frontend will error if the values are equal, so checking here
# lets us produce a nicer python error message and stack trace.
if min_value == max_value:
raise StreamlitAPIException(
"Slider `min_value` must be less than the `max_value`."
f"\nThe values were {min_value} and {max_value}."
)
# Now, convert to microseconds (so we can serialize datetime to a long)
if data_type in TIMELIKE_TYPES:
prepared_value = cast("Sequence[datetime]", prepared_value)
min_value = cast("datetime", min_value)
max_value = cast("datetime", max_value)
step = cast("timedelta", step)
# Restore times/datetimes to original timezone (dates are always naive)
orig_tz = (
prepared_value[0].tzinfo
if data_type in (SliderProto.TIME, SliderProto.DATETIME)
else None
)
prepared_value = list(map(_datetime_to_micros, prepared_value))
min_value = _datetime_to_micros(min_value)
max_value = _datetime_to_micros(max_value)
step = _delta_to_micros(step)
# At this point, prepared_value is expected to be a list of floats:
prepared_value = cast("list[float]", prepared_value)
# It would be great if we could guess the number of decimal places from
# the `step` argument, but this would only be meaningful if step were a
# decimal. As a possible improvement we could make this function accept
# decimals and/or use some heuristics for floats.
slider_proto = SliderProto()
slider_proto.type = SliderProto.Type.SLIDER
slider_proto.id = element_id
slider_proto.label = label
slider_proto.format = format
slider_proto.default[:] = prepared_value
slider_proto.min = min_value
slider_proto.max = max_value
slider_proto.step = cast("float", step)
slider_proto.data_type = data_type
slider_proto.options[:] = []
slider_proto.form_id = current_form_id(self.dg)
slider_proto.disabled = disabled
slider_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if help is not None:
slider_proto.help = dedent(help)
serde = SliderSerde(
prepared_value,
data_type,
single_value,
orig_tz,
)
widget_state = register_widget(
slider_proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="double_array_value",
)
if widget_state.value_changed:
# Min/Max bounds checks when the value is updated.
serialized_values = serde.serialize(widget_state.value)
for serialized_value in serialized_values:
# Use the deserialized values for more readable error messages for dates/times
deserialized_value = serde.deserialize_single_value(serialized_value)
if serialized_value < slider_proto.min:
raise StreamlitValueBelowMinError(
value=deserialized_value,
min_value=serde.deserialize_single_value(slider_proto.min),
)
if serialized_value > slider_proto.max:
raise StreamlitValueAboveMaxError(
value=deserialized_value,
max_value=serde.deserialize_single_value(slider_proto.max),
)
slider_proto.value[:] = serialized_values
slider_proto.set_value = True
validate_width(width)
layout_config = LayoutConfig(width=width)
self.dg._enqueue("slider", slider_proto, layout_config=layout_config)
return cast("SliderReturn", widget_state.value)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| SliderMixin |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/spark_azure_blob_storage_datasource.py | {
"start": 1227,
"end": 8405
} | class ____(_SparkFilePathDatasource):
"""
SparkAzureBlobStorageDatasource is a subclass of SparkDatasource which connects to
Azure Blob Storage.
"""
# class attributes
data_connector_type: ClassVar[Type[AzureBlobStorageDataConnector]] = (
AzureBlobStorageDataConnector
)
# instance attributes
type: Literal["spark_abs"] = "spark_abs"
# Azure Blob Storage specific attributes
azure_options: Dict[str, Union[ConfigStr, Any]] = {}
_account_name: str = pydantic.PrivateAttr(default="")
# on 3.11 the annotation must be type-checking import otherwise it will fail at import time
_azure_client: Union[BlobServiceClient, None] = pydantic.PrivateAttr(default=None)
def _get_azure_client(self) -> azure.BlobServiceClient:
azure_client: Union[azure.BlobServiceClient, None] = self._azure_client
if not azure_client:
_check_config_substitutions_needed(
self, self.azure_options, raise_warning_if_provider_not_present=True
)
# pull in needed config substitutions using the `_config_provider`
# The `FluentBaseModel.dict()` call will do the config substitution on the serialized dict if a `config_provider` is passed. # noqa: E501 # FIXME CoP
azure_options: dict = self.dict(config_provider=self._config_provider).get(
"azure_options", {}
)
# Thanks to schema validation, we are guaranteed to have one of `conn_str` or `account_url` to # noqa: E501 # FIXME CoP
# use in authentication (but not both). If the format or content of the provided keys is invalid, # noqa: E501 # FIXME CoP
# the assignment of `self._account_name` and `self._azure_client` will fail and an error will be raised. # noqa: E501 # FIXME CoP
conn_str: str | None = azure_options.get("conn_str")
account_url: str | None = azure_options.get("account_url")
if not bool(conn_str) ^ bool(account_url):
raise SparkAzureBlobStorageDatasourceError( # noqa: TRY003 # FIXME CoP
"You must provide one of `conn_str` or `account_url` to the `azure_options` key in your config (but not both)" # noqa: E501 # FIXME CoP
)
# Validate that "azure" libararies were successfully imported and attempt to create "azure_client" handle. # noqa: E501 # FIXME CoP
if azure.BlobServiceClient: # type: ignore[truthy-function] # False if NotImported
try:
if conn_str is not None:
self._account_name = re.search( # type: ignore[union-attr] # re.search could return None
r".*?AccountName=(.+?);.*?", conn_str
).group(1)
azure_client = azure.BlobServiceClient.from_connection_string(
**azure_options
)
elif account_url is not None:
self._account_name = re.search( # type: ignore[union-attr] # re.search could return None
r"(?:https?://)?(.+?).blob.core.windows.net",
account_url,
).group(1)
azure_client = azure.BlobServiceClient(**azure_options)
except Exception as e:
# Failure to create "azure_client" is most likely due invalid "azure_options" dictionary. # noqa: E501 # FIXME CoP
raise SparkAzureBlobStorageDatasourceError( # noqa: TRY003 # FIXME CoP
f'Due to exception: "{e!s}", "azure_client" could not be created.'
) from e
else:
raise SparkAzureBlobStorageDatasourceError( # noqa: TRY003 # FIXME CoP
'Unable to create "SparkAzureBlobStorageDatasource" due to missing azure.storage.blob dependency.' # noqa: E501 # FIXME CoP
)
self._azure_client = azure_client
if not azure_client:
raise SparkAzureBlobStorageDatasourceError("Failed to return `azure_client`") # noqa: TRY003 # FIXME CoP
return azure_client
@override
def test_connection(self, test_assets: bool = True) -> None:
"""Test the connection for the SparkAzureBlobStorageDatasource.
Args:
test_assets: If assets have been passed to the SparkAzureBlobStorageDatasource, whether to test them as well.
Raises:
TestConnectionError: If the connection test fails.
""" # noqa: E501 # FIXME CoP
try:
# tests Azure connection
_ = self._get_azure_client()
except Exception as e:
raise TestConnectionError( # noqa: TRY003 # FIXME CoP
f"Attempt to connect to datasource failed with the following error message: {e!s}"
) from e
# tests Spark connection, raising TestConnectionError
super().test_connection()
if self.assets and test_assets:
for asset in self.assets:
asset.test_connection()
@override
def _build_data_connector(
self,
data_asset: SPARK_PATH_ASSET_UNION,
abs_container: str = _MISSING, # type: ignore[assignment] # _MISSING is used as sentinel value
abs_name_starts_with: str = "",
abs_delimiter: str = "/",
abs_recursive_file_discovery: bool = False,
**kwargs,
) -> None:
"""Builds and attaches the `AzureBlobStorageDataConnector` to the asset."""
if kwargs:
raise TypeError( # noqa: TRY003 # FIXME CoP
f"_build_data_connector() got unexpected keyword arguments {list(kwargs.keys())}"
)
if abs_container is _MISSING:
raise TypeError(f"'{data_asset.name}' is missing required argument 'abs_container'") # noqa: TRY003 # FIXME CoP
data_asset._data_connector = self.data_connector_type.build_data_connector(
datasource_name=self.name,
data_asset_name=data_asset.name,
azure_client=self._get_azure_client(),
account_name=self._account_name,
container=abs_container,
name_starts_with=abs_name_starts_with,
delimiter=abs_delimiter,
recursive_file_discovery=abs_recursive_file_discovery,
file_path_template_map_fn=AzureUrl.AZURE_BLOB_STORAGE_WASBS_URL_TEMPLATE.format,
whole_directory_path_override=data_asset.get_whole_directory_path_override(),
)
# build a more specific `_test_connection_error_message`
data_asset._test_connection_error_message = (
self.data_connector_type.build_test_connection_error_message(
data_asset_name=data_asset.name,
account_name=self._account_name,
container=abs_container,
name_starts_with=abs_name_starts_with,
delimiter=abs_delimiter,
recursive_file_discovery=abs_recursive_file_discovery,
)
)
| SparkAzureBlobStorageDatasource |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 125410,
"end": 125765
} | class ____:
xlUnderlineStyleDouble = -4119 # from enum XlUnderlineStyle
xlUnderlineStyleDoubleAccounting = 5 # from enum XlUnderlineStyle
xlUnderlineStyleNone = -4142 # from enum XlUnderlineStyle
xlUnderlineStyleSingle = 2 # from enum XlUnderlineStyle
xlUnderlineStyleSingleAccounting = 4 # from enum XlUnderlineStyle
| UnderlineStyle |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 11145,
"end": 11273
} | class ____(PolymorphicModel):
topic = models.CharField(max_length=30)
class Meta:
abstract = True
| AbstractProject |
python | allegroai__clearml | examples/services/monitoring/slack_alerts.py | {
"start": 1423,
"end": 2807
} | class ____:
def __init__(self, include=None, exclude=None):
# type: (Optional[Union[str, List[str]]], Optional[Union[str, List[str]]]) -> ()
# Either `include` or `exclude` should be specified, but not both
if include is not None and exclude is not None:
raise ValueError("Specify either 'include' or 'exclude', not both!")
include = include or list()
if isinstance(include, str):
include = [include]
exclude = exclude or list()
if isinstance(exclude, str):
exclude = [exclude]
res = Task._get_default_session().send_request("users", "get_all")
if not res.ok:
raise RuntimeError("Cannot get list of all users!")
all_users = {d["name"]: d["id"] for d in res.json()["data"]["users"]}
for user in include + exclude:
if user not in all_users:
print(f"Cannot translate user '{user}' to any known user ID - "
f"will use it verbatim")
self.include = [all_users.get(user, user) for user in include] # Map usernames to user IDs
self.exclude = [all_users.get(user, user) for user in exclude]
def __call__(self, task):
# type: (Task) -> bool
if self.include:
return task.data.user not in self.include
return task.data.user in self.exclude
| UserFilter |
python | django-import-export__django-import-export | import_export/formats/base_formats.py | {
"start": 4109,
"end": 4209
} | class ____(TextFormat):
TABLIB_MODULE = "tablib.formats._html"
CONTENT_TYPE = "text/html"
| HTML |
python | openai__openai-python | src/openai/types/realtime/realtime_truncation_retention_ratio.py | {
"start": 691,
"end": 1380
} | class ____(BaseModel):
retention_ratio: float
"""
Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when
the conversation exceeds the input token limit. Setting this to `0.8` means that
messages will be dropped until 80% of the maximum allowed tokens are used. This
helps reduce the frequency of truncations and improve cache rates.
"""
type: Literal["retention_ratio"]
"""Use retention ratio truncation."""
token_limits: Optional[TokenLimits] = None
"""Optional custom token limits for this truncation strategy.
If not provided, the model's default token limits will be used.
"""
| RealtimeTruncationRetentionRatio |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1052739,
"end": 1053135
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("VerifiableDomain", graphql_name="node")
"""The item at the end of the edge."""
| VerifiableDomainEdge |
python | astropy__astropy | astropy/coordinates/sky_coordinate.py | {
"start": 1372,
"end": 78914
} | class ____(MaskableShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The |SkyCoord| class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
See also: https://docs.astropy.org/en/stable/coordinates/
Examples
--------
The examples below illustrate common ways of initializing a |SkyCoord|
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +31:12:43", "1 12 43.2 +31 12 43"]
>>> c = SkyCoord(coords, frame=FK4, unit=(u.hourangle, u.deg), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias -- lower-case versions of the
class name that allow for creating a |SkyCoord| object and transforming
frames without explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this |SkyCoord| should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied coordinate values.
If only one unit is supplied then it applies to all values.
Note that passing only one unit might lead to unit conversion errors
if the coordinate values are expected to have mixed physical meanings
(e.g., angles and distances).
obstime : time-like, optional
Time(s) of observation.
equinox : time-like, optional
Coordinate frame equinox time.
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : angle-like, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including ``ICRS``,
``FK5``, ``FK4``, and ``FK4NoETerms``.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components, in angle per time units.
l, b : angle-like, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the ``Galactic`` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity` ['angular speed'], optional
Proper motion components in the `~astropy.coordinates.Galactic` frame,
in angle per time units.
x, y, z : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity` ['length'], optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
# Methods implemented by the underlying frame
position_angle: Callable[[Union[BaseCoordinateFrame, "SkyCoord"]], Angle]
separation: Callable[[Union[BaseCoordinateFrame, "SkyCoord"]], Angle]
separation_3d: Callable[[Union[BaseCoordinateFrame, "SkyCoord"]], Distance]
def __init__(self, *args, copy=True, **kwargs):
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
# If all that is passed in is a frame instance that already has data,
# we should bypass all of the parsing and logic below. This is here
# to make this the fastest way to create a SkyCoord instance. Many of
# the classmethods implemented for performance enhancements will use
# this as the initialization path
if (
len(args) == 1
and len(kwargs) == 0
and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))
):
coords = args[0]
if isinstance(coords, SkyCoord):
self._extra_frameattr_names = coords._extra_frameattr_names
self.info = coords.info
# Copy over any extra frame attributes
for attr_name in self._extra_frameattr_names:
# Setting it will also validate it.
setattr(self, attr_name, getattr(coords, attr_name))
coords = coords.frame
if not coords.has_data:
raise ValueError(
"Cannot initialize from a coordinate frame "
"instance without coordinate data"
)
if copy:
self._sky_coord_frame = coords.copy()
else:
self._sky_coord_frame = coords
else:
# Get the frame instance without coordinate data but with all frame
# attributes set - these could either have been passed in with the
# frame as an instance, or passed in as kwargs here
frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
skycoord_kwargs, components, info = _parse_coordinate_data(
frame_cls(**frame_kwargs), args, kwargs
)
# In the above two parsing functions, these kwargs were identified
# as valid frame attributes for *some* frame, but not the frame that
# this SkyCoord will have. We keep these attributes as special
# skycoord frame attributes:
for attr in skycoord_kwargs:
# Setting it will also validate it.
setattr(self, attr, skycoord_kwargs[attr])
if info is not None:
self.info = info
# Finally make the internal coordinate object.
frame_kwargs.update(components)
self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError("Cannot create a SkyCoord without data")
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
@property
def shape(self):
return self.frame.shape
# The following 3 have identical implementation as in BaseCoordinateFrame,
# but we cannot just rely on __getattr__ to get them from the frame,
# because (1) get_mask has to be able to access our own attributes, and
# (2) masked and mask are abstract properties in MaskableSharedLikeNDArray
# which thus need to be explicitly defined.
# TODO: factor out common methods and attributes in a mixin class.
@property
def masked(self):
return self.data.masked
def get_mask(self, *attrs):
if not attrs:
# Just use the frame
return self._sky_coord_frame.get_mask()
values = operator.attrgetter(*attrs)(self)
if not isinstance(values, tuple):
values = (values,)
masks = [getattr(v, "mask", None) for v in values]
# Broadcast makes it readonly too.
return np.broadcast_to(combine_masks(masks), self.shape)
@property
def mask(self):
return self._sky_coord_frame.mask
masked.__doc__ = BaseCoordinateFrame.masked.__doc__
get_mask.__doc__ = BaseCoordinateFrame.get_mask.__doc__
mask.__doc__ = BaseCoordinateFrame.mask.__doc__
def __eq__(self, value):
"""Equality operator for SkyCoord.
This implements strict equality and requires that the frames are
equivalent, extra frame attributes are equivalent, and that the
representation data are exactly equal.
"""
if isinstance(value, BaseCoordinateFrame):
if value._data is None:
raise ValueError("Can only compare SkyCoord to Frame with data")
return self.frame == value
if not isinstance(value, SkyCoord):
return NotImplemented
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(
f"cannot compare: extra frame attribute '{attr}' is not equivalent"
" (perhaps compare the frames directly to avoid this exception)"
)
return self._sky_coord_frame == value._sky_coord_frame
def __ne__(self, value):
return np.logical_not(self == value)
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.shapes.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
*args
Any positional arguments for ``method``.
**kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method, *args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, "shape", ()):
value = apply_method(value)
elif method == "copy" or method == "flatten":
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, "_" + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if "info" in self.__dict__:
new.info = self.info
return new
def __setitem__(self, item, value):
"""Implement self[item] = value for SkyCoord.
The right hand ``value`` must be strictly consistent with self:
- Identical class
- Equivalent frames
- Identical representation_types
- Identical representation differentials keys
- Identical frame attributes
- Identical "extra" frame attributes (e.g. obstime for an ICRS coord)
With these caveats the setitem ends up as effectively a setitem on
the representation data.
self.frame.data[item] = value.frame.data
"""
if value is np.ma.masked or value is np.ma.nomask:
self.data.__setitem__(item, value)
self.cache.clear()
return
if self.__class__ is not value.__class__:
raise TypeError(
"can only set from object of same class: "
f"{self.__class__.__name__} vs. {value.__class__.__name__}"
)
# Make sure that any extra frame attribute names are equivalent.
for attr in self._extra_frameattr_names | value._extra_frameattr_names:
if not self.frame._frameattr_equiv(
getattr(self, attr), getattr(value, attr)
):
raise ValueError(f"attribute {attr} is not equivalent")
# Set the frame values. This checks frame equivalence and also clears
# the cache to ensure that the object is not in an inconsistent state.
self._sky_coord_frame[item] = value._sky_coord_frame
def insert(self, obj, values, axis=0):
return self.info._insert(obj, values, axis)
insert.__doc__ = SkyCoordInfo._insert.__doc__
def is_transformable_to(self, new_frame):
"""
Determines if this coordinate frame can be transformed to another
given frame.
Parameters
----------
new_frame : frame class, frame object, or str
The proposed frame to transform into.
Returns
-------
transformable : bool or str
`True` if this can be transformed to ``new_frame``, `False` if
not, or the string 'same' if ``new_frame`` is the same system as
this object but no transformation is defined.
Notes
-----
A return value of 'same' means the transformation will work, but it will
just give back a copy of this object. The intended usage is::
if coord.is_transformable_to(some_unknown_frame):
coord2 = coord.transform_to(some_unknown_frame)
This will work even if ``some_unknown_frame`` turns out to be the same
frame class as ``coord``. This is intended for cases where the frame
is the same regardless of the frame attributes (e.g. ICRS), but be
aware that it *might* also indicate that someone forgot to define the
transformation between two objects of the same frame class but with
different attributes.
"""
# TODO! like matplotlib, do string overrides for modified methods
new_frame = (
_get_frame_class(new_frame) if isinstance(new_frame, str) else new_frame
)
return self.frame.is_transformable_to(new_frame)
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
|SkyCoord| that are not part of the destination frame's definition are
kept (stored on the resulting |SkyCoord|), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without losing obstime).
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` class or instance, or |SkyCoord| instance
The frame to transform this coordinate into. If a |SkyCoord|, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : |SkyCoord|
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if frame_val is not None and not (
merge_attributes and frame.is_frame_attr_default(attr)
):
frame_kwargs[attr] = frame_val
elif self_val is not None and not self.is_frame_attr_default(attr):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError(
"Transform `frame` must be a frame name, class, or instance"
)
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError(
f"Cannot transform from {self.frame.__class__} to {new_frame_cls}"
)
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in set(new_coord.frame_attributes) & set(frame_kwargs.keys()):
frame_kwargs.pop(attr)
# Always remove the origin frame attribute, as that attribute only makes
# sense with a SkyOffsetFrame (in which case it will be stored on the frame).
# See gh-11277.
# TODO: Should it be a property of the frame attribute that it can
# or cannot be stored on a SkyCoord?
frame_kwargs.pop("origin", None)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""Compute the position to a new time using the velocities.
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation".
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : |SkyCoord|
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
from .builtin_frames.icrs import ICRS
if (new_obstime is None) == (dt is None):
raise ValueError(
"You must specify one of `new_obstime` or `dt`, but not both."
)
# Validate that we have velocity info
if "s" not in self.frame.data.differentials:
raise ValueError("SkyCoord requires velocity data to evolve the position.")
if "obstime" in self.frame.frame_attributes:
raise NotImplementedError(
"Updating the coordinates in a frame with explicit time dependence is"
" currently not supported. If you would like this functionality, please"
" open an issue on github:\nhttps://github.com/astropy/astropy"
)
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError(
"This object has no associated `obstime`. apply_space_motion() must"
" receive a time difference, `dt`, and not a new obstime."
)
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time("J2000")
new_obstime = None # we don't actually know the initial obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials["s"]
parallax_zero = False
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.0
parallax_zero = True
try:
rv = icrsvel.d_distance.to_value(u.km / u.s)
except u.UnitConversionError: # No RV
rv = 0.0
starpm = erfa.pmsafe(
icrsrep.lon.radian,
icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian / u.yr),
icrsvel.d_lat.to_value(u.radian / u.yr),
plx,
rv,
t1.jd1,
t1.jd2,
t2.jd1,
t2.jd2,
)
if parallax_zero:
new_distance = None
else:
new_distance = Distance(parallax=starpm[4] << u.arcsec)
icrs2 = ICRS(
ra=u.Quantity(starpm[0], u.radian, copy=COPY_IF_NEEDED),
dec=u.Quantity(starpm[1], u.radian, copy=COPY_IF_NEEDED),
pm_ra=u.Quantity(starpm[2], u.radian / u.yr, copy=COPY_IF_NEEDED),
pm_dec=u.Quantity(starpm[3], u.radian / u.yr, copy=COPY_IF_NEEDED),
distance=new_distance,
radial_velocity=u.Quantity(starpm[5], u.km / u.s, copy=COPY_IF_NEEDED),
differential_type=SphericalDifferential,
)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {
attrnm: getattr(self, attrnm) for attrnm in self._extra_frameattr_names
}
frattrs["obstime"] = new_obstime
result = self.__class__(icrs2, **frattrs).transform_to(self.frame)
# Without this the output might not have the right differential type.
# Not sure if this fixes the problem or just hides it. See #11932
result.differential_type = self.differential_type
return result
def _is_name(self, string):
"""
Returns whether a string is one of the aliases for the frame.
"""
return self.frame.name == string or (
isinstance(self.frame.name, list) and string in self.frame.name
)
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the primary transform graph.
"""
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.frame_attributes:
return getattr(self.frame, attr)
else:
return getattr(self, "_" + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Call __getattribute__; this will give correct exception.
return self.__getattribute__(attr)
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if attr != "info" and not attr.startswith("_"):
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__("_" + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
return
if frame_transform_graph.lookup_name(attr) is not None:
raise AttributeError(f"'{attr}' is immutable")
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if "_sky_coord_frame" in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith("_") and hasattr(self._sky_coord_frame, attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__("_" + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
def __dir__(self):
"""Original dir() behavior, plus frame attributes and transforms.
This dir includes:
- All attributes of the SkyCoord class
- Coordinate transforms available by aliases
- Attribute / methods of the underlying self.frame objects
"""
dir_values = set(super().__dir__())
# determine the aliases that this can be transformed to.
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update(
{attr for attr in dir(self.frame) if not attr.startswith("_")}
)
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return sorted(dir_values)
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ": " + frameattrs
data = self.frame._data_repr()
if data:
data = ": " + data
return f"<{clsnm} ({coonm}{frameattrs}){data}>"
def to_string(self, style="decimal", **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
**kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {
"hmsdms": {
"lonargs": {"unit": u.hour, "pad": True},
"latargs": {"unit": u.degree, "pad": True, "alwayssign": True},
},
"dms": {"lonargs": {"unit": u.degree}, "latargs": {"unit": u.degree}},
"decimal": {
"lonargs": {"unit": u.degree, "decimal": True},
"latargs": {"unit": u.degree, "decimal": True},
},
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]["lonargs"])
latargs.update(styles[style]["latargs"])
else:
raise ValueError(f"Invalid style. Valid options are: {','.join(styles)}")
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (
f"{sph_coord.lon.to_string(**lonargs)}"
f" {sph_coord.lat.to_string(**latargs)}"
)
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [
f"{lonangle.to_string(**lonargs)} {latangle.to_string(**latargs)}"
]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def to_table(self):
"""
Convert this |SkyCoord| to a |QTable|.
Any attributes that have the same length as the |SkyCoord| will be
converted to columns of the |QTable|. All other attributes will be
recorded as metadata.
Returns
-------
`~astropy.table.QTable`
A |QTable| containing the data of this |SkyCoord|.
Examples
--------
>>> sc = SkyCoord(ra=[40, 70]*u.deg, dec=[0, -20]*u.deg,
... obstime=Time([2000, 2010], format='jyear'))
>>> t = sc.to_table()
>>> t
<QTable length=2>
ra dec obstime
deg deg
float64 float64 Time
------- ------- -------
40.0 0.0 2000.0
70.0 -20.0 2010.0
>>> t.meta
{'representation_type': 'spherical', 'frame': 'icrs'}
"""
table = self.frame.to_table()
# Record extra attributes not on the frame that have the same length as self as
# columns in the table, and the other attributes as table metadata.
# This matches table.serialize._represent_mixin_as_column().
table.meta["frame"] = self.frame.name
for key in self._extra_frameattr_names:
value = getattr(self, key)
if getattr(value, "shape", ())[:1] == (len(self),):
table[key] = value
else:
table.meta[key] = value
return table
def is_equivalent_frame(self, other):
"""
Checks if this object's frame is the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two |SkyCoord| objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a |SkyCoord| or a subclass of
`~astropy.coordinates.BaseCoordinateFrame`.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if not BaseCoordinateFrame._frameattr_equiv(
getattr(self, fattrnm), getattr(other, fattrnm)
):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError(
"Tried to do is_equivalent_frame on something that isn't frame-like"
)
# High-level convenience methods
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to find the offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the
:meth:`~astropy.coordinates.BaseCoordinateFrame.separation`/:meth:`~astropy.coordinates.BaseCoordinateFrame.separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
:meth:`~astropy.coordinates.BaseCoordinateFrame.separation` :
for the *total* angular offset (not broken out into components).
:meth:`~astropy.coordinates.BaseCoordinateFrame.position_angle` :
for the direction of the offset.
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError(
"Tried to use spherical_offsets_to with two non-matching frames!"
)
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def spherical_offsets_by(self, d_lon, d_lat):
"""
Computes the coordinate that is a specified pair of angular offsets away
from this coordinate.
Parameters
----------
d_lon : angle-like
The angular offset in the longitude direction. The definition of
"longitude" depends on this coordinate's frame (e.g., RA for
equatorial coordinates).
d_lat : angle-like
The angular offset in the latitude direction. The definition of
"latitude" depends on this coordinate's frame (e.g., Dec for
equatorial coordinates).
Returns
-------
newcoord : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
``d_lat`` in the latitude direction and ``d_lon`` in the longitude
direction.
Notes
-----
This internally uses `~astropy.coordinates.SkyOffsetFrame` to do the
transformation. For a more complete set of transform offsets, use
`~astropy.coordinates.SkyOffsetFrame` or `~astropy.wcs.WCS` manually.
This specific method can be reproduced by doing
``SkyCoord(SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self))``.
See Also
--------
spherical_offsets_to : compute the angular offsets to another coordinate
directional_offset_by : offset a coordinate by an angle in a direction
"""
from .builtin_frames.skyoffset import SkyOffsetFrame
return self.__class__(
SkyOffsetFrame(d_lon, d_lat, origin=self.frame).transform_to(self)
)
def directional_offset_by(self, position_angle, separation):
"""
Computes coordinates at the given offset from this coordinate.
Parameters
----------
position_angle : `~astropy.coordinates.Angle`
position_angle of offset
separation : `~astropy.coordinates.Angle`
offset angular separation
Returns
-------
newpoints : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
the given ``position_angle`` and ``separation``.
Notes
-----
Returned SkyCoord frame retains only the frame attributes that are for
the resulting frame type. (e.g. if the input frame is
`~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but
an ``obstime`` will not.)
For a more complete set of transform offsets, use `~astropy.wcs.WCS`.
`~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to
create a spherical frame with (lat=0, lon=0) at a reference point,
approximating an xy cartesian system for small offsets. This method
is distinct in that it is accurate on the sphere.
See Also
--------
:meth:`~astropy.coordinates.BaseCoordinateFrame.position_angle` :
inverse operation for the ``position_angle`` component
:meth:`~astropy.coordinates.BaseCoordinateFrame.separation` :
inverse operation for the ``separation`` component
"""
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
newlon, newlat = offset_by(
lon=slon, lat=slat, posang=position_angle, distance=separation
)
return SkyCoord(newlon, newlat, frame=self.frame)
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :ref:`astropy-coordinates-separations-matching`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
CoordinateMatchResult
A `~typing.NamedTuple` with attributes representing for each
source in this |SkyCoord| the indices and angular and
physical separations of the match in ``catalogcoord``. If
either the |SkyCoord| or ``catalogcoord`` don't have
distances, the physical separation is the 3D distance on the
unit sphere, rather than a true distance.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
return match_coordinates_sky(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_sky"
)
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :ref:`astropy-coordinates-separations-matching`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
CoordinateMatchResult
A `~typing.NamedTuple` with attributes representing for each
source in this |SkyCoord| the indices and angular and physical
separations of the match in ``catalogcoord``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if not (
isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data
):
raise TypeError(
"Can only get separation to another SkyCoord or a "
"coordinate frame with data"
)
return match_coordinates_3d(
self, catalogcoord, nthneighbor=nthneighbor, storekdtree="_kdtree_3d"
)
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
:meth:`~astropy.coordinates.BaseCoordinateFrame.separation`.
For more on how to use this (and related) functionality, see the
examples in :ref:`astropy-coordinates-separations-matching`.
Parameters
----------
searcharoundcoords : coordinate-like
The coordinates to search around to try to find matching points in
this |SkyCoord|. This must be a one-dimensional coordinate array.
seplimit : `~astropy.units.Quantity` ['angle']
The on-sky separation to search within. It should be broadcastable to the
same shape as ``searcharoundcoords``.
Returns
-------
CoordinateSearchResult
A `~typing.NamedTuple` with attributes representing the
indices of the elements of found pairs in the other set of
coordinates and this |SkyCoord| and angular and physical
separations of the pairs. If either set of sources lack
distances, the physical separation is the 3D distance on the
unit sphere, rather than a true distance.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(
searcharoundcoords, self, seplimit, storekdtree="_kdtree_sky"
)
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
:meth:`~astropy.coordinates.BaseCoordinateFrame.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :ref:`astropy-coordinates-separations-matching`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this |SkyCoord|. This must be a one-dimensional coordinate array.
distlimit : `~astropy.units.Quantity` ['length']
The physical radius to search within. It should be broadcastable to the same
shape as ``searcharoundcoords``.
Returns
-------
CoordinateSearchResult
A `~typing.NamedTuple` with attributes representing the
indices of the elements of found pairs in the other set of
coordinates and this |SkyCoord| and angular and physical
separations of the pairs.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(
searcharoundcoords, self, distlimit, storekdtree="_kdtree_3d"
)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this SkyCoord at the origin.
Parameters
----------
rotation : angle-like
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this |SkyCoord| (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
"""
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list="iau"):
"""
Determines the constellation(s) of the coordinates this SkyCoord contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array |SkyCoord|, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <https://cdsarc.cds.unistra.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position information is used
extra_frameattrs = {nm: getattr(self, nm) for nm in self._extra_frameattr_names}
novel = SkyCoord(
self.realize_frame(self.data.without_differentials()), **extra_frameattrs
)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
# return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode="all"):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
from astropy.wcs.utils import skycoord_to_pixel
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode="all"):
"""
Create a new SkyCoord from pixel coordinates using a World Coordinate System.
Parameters
----------
xp, yp : float or ndarray
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : `~astropy.coordinates.SkyCoord`
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
from astropy.wcs.utils import pixel_to_skycoord
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def contained_by(self, wcs, image=None, **kwargs):
"""
Determines if the SkyCoord is contained in the given wcs footprint.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The coordinate to check if it is within the wcs coordinate.
image : array
Optional. The image associated with the wcs object that the coordinate
is being checked against. If not given the naxis keywords will be used
to determine if the coordinate falls within the wcs footprint.
**kwargs
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
if image is not None:
ymax, xmax = image.shape
else:
xmax, ymax = wcs._naxis
with warnings.catch_warnings():
# Suppress warnings since they just mean we didn't find the coordinate
warnings.simplefilter("ignore")
try:
x, y = self.to_pixel(wcs, **kwargs)
except Exception:
return False
return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)
def radial_velocity_correction(
self, kind="barycentric", obstime=None, location=None
):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the |SkyCoord| will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the |SkyCoord| will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this |SkyCoord|.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` ['speed']
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. The barycentric correction returned uses the optical
approximation v = z * c. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
>>> from astropy.time import Time
>>> from astropy.coordinates import SkyCoord, EarthLocation
>>> from astropy.constants import c
>>> t = Time(56370.5, format='mjd', scale='utc')
>>> loc = EarthLocation('149d33m00.5s','-30d18m46.385s',236.87*u.m)
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> vcorr = sc.radial_velocity_correction(kind='barycentric', obstime=t, location=loc) # doctest: +REMOTE_DATA
>>> rv = rv + vcorr + rv * vcorr / c # doctest: +SKIP
Also note that this method returns the correction velocity in the so-called
*optical convention*::
>>> vcorr = zb * c # doctest: +SKIP
where ``zb`` is the barycentric correction redshift as defined in section 3
of Wright & Eastman (2014). The application formula given above follows from their
equation (11) under assumption that the radial velocity ``rv`` has also been defined
using the same optical convention. Note, this can be regarded as a matter of
velocity definition and does not by itself imply any loss of accuracy, provided
sufficient care has been taken during interpretation of the results. If you need
the barycentric correction expressed as the full relativistic velocity (e.g., to provide
it as the input to another software which performs the application), the
following recipe can be used::
>>> zb = vcorr / c # doctest: +REMOTE_DATA
>>> zb_plus_one_squared = (zb + 1) ** 2 # doctest: +REMOTE_DATA
>>> vcorr_rel = c * (zb_plus_one_squared - 1) / (zb_plus_one_squared + 1) # doctest: +REMOTE_DATA
or alternatively using just equivalencies::
>>> vcorr_rel = vcorr.to(u.Hz, u.doppler_optical(1*u.Hz)).to(vcorr.unit, u.doppler_relativistic(1*u.Hz)) # doctest: +REMOTE_DATA
See also `~astropy.units.doppler_optical`,
`~astropy.units.doppler_radio`, and
`~astropy.units.doppler_relativistic` for more information on
the velocity conventions.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
>>> from astropy.coordinates import solar_system_ephemeris
>>> sc = SkyCoord(1*u.deg, 2*u.deg)
>>> with solar_system_ephemeris.set('jpl'): # doctest: +REMOTE_DATA
... rv += sc.radial_velocity_correction(obstime=t, location=loc) # doctest: +SKIP
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel
# location validation
timeloc = getattr(obstime, "location", None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError(
"`location` cannot be in both the passed-in `obstime` and this"
" `SkyCoord` because it is ambiguous which is meant for the"
" radial_velocity_correction."
)
elif timeloc is not None:
location = timeloc
else:
raise TypeError(
"Must provide a `location` to radial_velocity_correction, either as"
" a SkyCoord frame attribute, as an attribute on the passed in"
" `obstime`, or in the method call."
)
elif self.location is not None or timeloc is not None:
raise ValueError(
"Cannot compute radial velocity correction if `location` argument is"
" passed in and there is also a `location` attribute on this SkyCoord"
" or the passed-in `obstime`."
)
# obstime validation
coo_at_rv_obstime = self # assume we need no space motion for now
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError(
"Must provide an `obstime` to radial_velocity_correction, either as"
" a SkyCoord frame attribute or in the method call."
)
elif self.obstime is not None and self.frame.data.differentials:
# we do need space motion after all
coo_at_rv_obstime = self.apply_space_motion(obstime)
elif self.obstime is None and "s" in self.data.differentials:
warnings.warn(
"SkyCoord has space motion, and therefore the specified "
"position of the SkyCoord may not be the same as "
"the `obstime` for the radial velocity measurement. "
"This may affect the rv correction at the order of km/s"
"for very high proper motions sources. If you wish to "
"apply space motion of the SkyCoord to correct for this"
"the `obstime` attribute of the SkyCoord must be set",
AstropyUserWarning,
)
pos_earth, v_origin_to_earth = get_body_barycentric_posvel("earth", obstime)
if kind == "heliocentric":
v_origin_to_earth -= get_body_barycentric_posvel("sun", obstime)[1]
elif kind != "barycentric":
raise ValueError(
"`kind` argument to radial_velocity_correction must "
f"be 'barycentric' or 'heliocentric', but got '{kind}'"
)
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
icrs_cart = coo_at_rv_obstime.icrs.cartesian
targcart = icrs_cart.without_differentials()
if self.data.__class__ is not UnitSphericalRepresentation:
# SkyCoord has distances, so apply parallax by calculating
# the direction of the target as seen by the observer.
targcart -= pos_earth + gcrs_p
targcart /= targcart.norm()
if kind == "heliocentric":
# Do a simpler correction than for barycentric ignoring time dilation and
# gravitational redshift. This is adequate since heliocentric corrections
# shouldn't be used if cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm() ** 2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + beta_obs.dot(targcart)) / (1 + gr / speed_of_light)
# try and get terms corresponding to stellar motion.
if icrs_cart.differentials:
try:
ro = self.icrs.cartesian
beta_star = ro.differentials["s"].to_cartesian() / speed_of_light
# ICRS unit vector at coordinate epoch
ro = ro.without_differentials()
ro /= ro.norm()
zb *= (1 + beta_star.dot(ro)) / (1 + beta_star.dot(targcart))
except u.UnitConversionError:
warnings.warn(
"SkyCoord contains some velocity information, but not enough to"
" calculate the full space motion of the source, and so this"
" has been ignored for the purposes of calculating the radial"
" velocity correction. This can lead to errors on the order of"
" metres/second.",
AstropyUserWarning,
)
return (zb - 1) * speed_of_light
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new SkyCoord from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the components of the requested frames (including
differentials), if they are also followed by a non-alphanumeric
character. It will also match columns that *end* with the component name
if a non-alphanumeric character is *before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : `~astropy.table.Table` or subclass
The table to load data from.
**coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : `~astropy.coordinates.SkyCoord` or subclass
The new instance.
Raises
------
ValueError
If more than one match is found in the table for a component,
unless the additional matches are also valid frame component names.
If a "coord_kwargs" is provided for a value also found in the table.
"""
_frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)
frame = _frame_cls(**_frame_kwargs)
coord_kwargs["frame"] = coord_kwargs.get("frame", frame)
representation_component_names = set(
frame.get_representation_component_names()
).union(set(frame.get_representation_component_names("s")))
comp_kwargs = {}
for comp_name in representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r"(\W|\b|_)"
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r".*(\W|\b|_)" + comp_name + r"\b"
# the final regex ORs together the two patterns
rex = re.compile(
rf"({starts_with_comp})|({ends_with_comp})", re.IGNORECASE | re.UNICODE
)
# find all matches
matches = {col_name for col_name in table.colnames if rex.match(col_name)}
# now need to select among matches, also making sure we don't have
# an exact match with another component
if len(matches) == 0: # no matches
continue
elif len(matches) == 1: # only one match
col_name = matches.pop()
else: # more than 1 match
# try to sieve out other components
matches -= representation_component_names - {comp_name}
# if there's only one remaining match, it worked.
if len(matches) == 1:
col_name = matches.pop()
else:
raise ValueError(
f'Found at least two matches for component "{comp_name}":'
f' "{matches}". Cannot guess coordinates from a table with this'
" ambiguity."
)
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError(
f'Found column "{v.name}" in table, but it was already provided as'
' "{k}" keyword to guess_from_table function.'
)
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame="icrs", parse=False, cache=True):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
parse : bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names, e.g.,
'CRTS SSS100805 J194428-420209', this may be much faster than a
Sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
cache : bool, optional
Determines whether to cache the results or not. To update or
overwrite an existing value, pass ``cache='update'``.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name, parse, cache=cache)
icrs_sky_coord = cls(icrs_coord)
if frame in ("icrs", icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
| SkyCoord |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 19782,
"end": 20555
} | class ____(graphene.Mutation):
"""Terminates a run."""
Output = graphene.NonNull(GrapheneTerminateRunResult)
class Arguments:
runId = graphene.NonNull(graphene.String)
terminatePolicy = graphene.Argument(GrapheneTerminateRunPolicy)
class Meta:
name = "TerminateRunMutation"
@capture_error
@require_permission_check(Permissions.TERMINATE_PIPELINE_EXECUTION)
def mutate(
self,
graphene_info: ResolveInfo,
runId: str,
terminatePolicy: Optional[GrapheneTerminateRunPolicy] = None,
):
return terminate_pipeline_execution(
graphene_info,
runId,
terminatePolicy or GrapheneTerminateRunPolicy.SAFE_TERMINATE,
)
| GrapheneTerminateRunMutation |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/unpack1.py | {
"start": 131,
"end": 942
} | class ____: ...
a = [1, "hello", 3.4, Class1()]
b = [*a]
def int_only(a: int): ...
for c in b:
if not isinstance(c, (float, str)):
# This should generate an error because c can
# be an int or foo.
int_only(c)
if not isinstance(c, Class1):
# This should not generate an error.
int_only(c)
# This should generate an error
x1 = *(1, 2, 3)
x2 = 2, *(1, 2, 3)
x3 = *(1, 2, 3), 2
[d1, *e1, f1] = [1, 2, 3, 4]
reveal_type(e1, expected_text="list[int]")
[*d2, e2, f2] = [1, 2, 3, 4]
reveal_type(d2, expected_text="list[int]")
[d3, e3, *f3] = (1, 2, 3, 4)
reveal_type(f3, expected_text="list[int]")
[g1, g2, g3] = (1, 2, 3)
# This should generate an error.
[g1, g2, g3, g4] = (1, 2, 3)
# This should generate an error.
[g1, g2] = (1, 2, 3)
| Class2 |
python | pyparsing__pyparsing | examples/simpleBool.py | {
"start": 1225,
"end": 1617
} | class ____:
repr_symbol: str = ""
eval_fn: Callable[
[Iterable[bool]], bool
] = lambda _: False
def __init__(self, t):
self.args = t[0][0::2]
def __str__(self) -> str:
sep = f" {self.repr_symbol} "
return f"({sep.join(map(str, self.args))})"
def __bool__(self) -> bool:
return self.eval_fn(bool(a) for a in self.args)
| BoolBinOp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.