language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
lepture__authlib
|
authlib/oauth2/rfc6749/requests.py
|
{
"start": 1576,
"end": 4614
}
|
class ____(OAuth2Payload):
def __init__(self, method: str, uri: str, body=None, headers=None):
InsecureTransportError.check(uri)
#: HTTP method
self.method = method
self.uri = uri
#: HTTP headers
self.headers = headers or {}
# Store body for backward compatibility but issue deprecation warning if used
if body is not None:
deprecate(
"'body' parameter in OAuth2Request is deprecated. "
"Use the payload system instead.",
version="1.8",
)
self._body = body
self.payload = None
self.client = None
self.auth_method = None
self.user = None
self.authorization_code = None
self.refresh_token = None
self.credential = None
@property
def args(self):
raise NotImplementedError()
@property
def form(self):
if self._body:
return self._body
raise NotImplementedError()
@property
def data(self):
deprecate(
"'request.data' is deprecated in favor of 'request.payload.data'",
version="1.8",
)
return self.payload.data
@property
def datalist(self) -> defaultdict[str, list]:
deprecate(
"'request.datalist' is deprecated in favor of 'request.payload.datalist'",
version="1.8",
)
return self.payload.datalist
@property
def client_id(self) -> str:
deprecate(
"'request.client_id' is deprecated in favor of 'request.payload.client_id'",
version="1.8",
)
return self.payload.client_id
@property
def response_type(self) -> str:
deprecate(
"'request.response_type' is deprecated in favor of 'request.payload.response_type'",
version="1.8",
)
return self.payload.response_type
@property
def grant_type(self) -> str:
deprecate(
"'request.grant_type' is deprecated in favor of 'request.payload.grant_type'",
version="1.8",
)
return self.payload.grant_type
@property
def redirect_uri(self):
deprecate(
"'request.redirect_uri' is deprecated in favor of 'request.payload.redirect_uri'",
version="1.8",
)
return self.payload.redirect_uri
@property
def scope(self) -> str:
deprecate(
"'request.scope' is deprecated in favor of 'request.payload.scope'",
version="1.8",
)
return self.payload.scope
@property
def state(self):
deprecate(
"'request.state' is deprecated in favor of 'request.payload.state'",
version="1.8",
)
return self.payload.state
@property
def body(self):
deprecate(
"'request.body' is deprecated. Use the payload system instead.",
version="1.8",
)
return self._body
|
OAuth2Request
|
python
|
PrefectHQ__prefect
|
src/prefect/server/utilities/messaging/__init__.py
|
{
"start": 684,
"end": 917
}
|
class ____(Protocol):
"""
A protocol representing a message sent to a message broker.
"""
@property
def data(self) -> Union[str, bytes]: ...
@property
def attributes(self) -> Mapping[str, Any]: ...
|
Message
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/appflow.py
|
{
"start": 5956,
"end": 7214
}
|
class ____(AppflowBaseOperator):
"""
Execute an AppFlow run as is.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AppflowRunOperator`
:param flow_name: The flow name
:param poll_interval: how often in seconds to check the query status
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: aws region to use
:param wait_for_completion: whether to wait for the run to end to return
"""
def __init__(
self,
flow_name: str,
poll_interval: int = 20,
wait_for_completion: bool = True,
**kwargs,
) -> None:
super().__init__(
flow_name=flow_name,
flow_update=False,
source_field=None,
filter_date=None,
poll_interval=poll_interval,
wait_for_completion=wait_for_completion,
**kwargs,
)
|
AppflowRunOperator
|
python
|
django__django
|
tests/servers/tests.py
|
{
"start": 12968,
"end": 13625
}
|
class ____(LiveServerBase):
def test_fixtures_loaded(self):
"""
Fixtures are properly loaded and visible to the live server thread.
"""
with self.urlopen("/model_view/") as f:
self.assertCountEqual(f.read().splitlines(), [b"jane", b"robert"])
def test_database_writes(self):
"""
Data written to the database by a view can be read.
"""
with self.urlopen("/create_model_instance/"):
pass
self.assertQuerySetEqual(
Person.objects.order_by("pk"),
["jane", "robert", "emily"],
lambda b: b.name,
)
|
LiveServerDatabase
|
python
|
numba__numba
|
numba/tests/test_exceptions.py
|
{
"start": 1192,
"end": 2987
}
|
class ____(Exception):
def __init__(self, arg, value0):
super(UDENoArgSuper, self).__init__()
self.deferarg = arg
self.value0 = value0
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
same = True
same |= self.args == other.args
same |= self.deferarg == other.deferarg
same |= self.value0 == other.value0
return same
def __hash__(self):
return hash((super(UDENoArgSuper).__hash__(), self.deferarg,
self.value0))
def raise_class(exc):
def raiser(i):
if i == 1:
raise exc
elif i == 2:
raise ValueError
elif i == 3:
# The exception type is looked up on a module (issue #1624)
raise np.linalg.LinAlgError
return i
return raiser
def raise_instance(exc, arg):
def raiser(i):
if i == 1:
raise exc(arg, 1)
elif i == 2:
raise ValueError(arg, 2)
elif i == 3:
raise np.linalg.LinAlgError(arg, 3)
return i
return raiser
def raise_instance_runtime_args(exc):
def raiser(i, arg):
if i == 1:
raise exc(arg, 1)
elif i == 2:
raise ValueError(arg, 2)
elif i == 3:
raise np.linalg.LinAlgError(arg, 3)
return i
return raiser
def reraise():
raise
def outer_function(inner):
def outer(i):
if i == 3:
raise OtherError("bar", 3)
return inner(i)
return outer
def assert_usecase(i):
assert i == 1, "bar"
def ude_bug_usecase():
raise UDEArgsToSuper() # oops user forgot args to exception ctor
def raise_runtime_value(arg):
raise ValueError(arg)
|
UDENoArgSuper
|
python
|
dask__dask
|
dask/array/tests/test_dispatch.py
|
{
"start": 8283,
"end": 8980
}
|
class ____:
__array_ufunc__ = None
def __mul__(self, other):
return 42
__rmul__ = __mul__
@pytest.mark.parametrize("arr", [da.from_array([1, 2]), np.asarray([1, 2])])
def test_delegation_unknown_scalar(arr):
s = UnknownScalar()
assert arr * s == 42
assert s * arr == 42
with pytest.raises(
TypeError, match="operand 'UnknownScalar' does not support ufuncs"
):
np.multiply(s, arr)
def test_delegation_specific_cases():
a = da.from_array(["a", "b", ".", "d"])
# Fixes GH6631
assert_eq(a == ".", [False, False, True, False])
assert_eq("." == a, [False, False, True, False])
# Fixes GH6611
assert "b" in a
|
UnknownScalar
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/GraphicsScene/exportDialog.py
|
{
"start": 254,
"end": 467
}
|
class ____(QtWidgets.QListWidgetItem):
def __init__(self, expClass, *args, **kwargs):
QtWidgets.QListWidgetItem.__init__(self, *args, **kwargs)
self.expClass = expClass
|
FormatExportListWidgetItem
|
python
|
tensorflow__tensorflow
|
tensorflow/compiler/tests/pooling_ops_3d_test.py
|
{
"start": 1453,
"end": 14947
}
|
class ____(xla_test.XLATestCase):
def _VerifyValues(self, pool_func, input_sizes, window, strides, padding,
expected):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called: co.MaxPool, co.AvgPool.
input_sizes: Input tensor dimensions.
window: Tuple of kernel dims: planes, rows, cols.
strides: Tuple of strides for dims: planes, rows, cols.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = np.arange(1.0, total_size + 1, dtype=np.float32)
x = x.reshape(input_sizes)
with self.session() as sess, self.test_scope():
inputs = array_ops.placeholder(dtypes.float32)
t = pool_func(
inputs,
ksize=[1] + window + [1],
strides=[1] + strides + [1],
padding=padding)
vals = sess.run(t, {inputs: x})
# Verifies values.
actual = vals.flatten()
self.assertAllClose(expected, actual)
def testAvgPool3dValidPadding(self):
expected_output = [20.5, 21.5, 22.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
expected=expected_output)
def testAvgPool3dSamePadding(self):
expected_output = [20.5, 21.5, 22.5, 26.5, 27.5, 28.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 2, 2, 4, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
expected=expected_output)
def testAvgPool3dSamePaddingDifferentStrides(self):
expected_output = [1.5, 4.5, 7.5, 17.5, 20.5, 23.5, 33.5, 36.5, 39.5]
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=[1, 2, 3],
strides=[2, 3, 1],
padding="SAME",
expected=expected_output)
def testMaxPool3dValidPadding(self):
expected_output = [40.0, 41.0, 42.0]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
expected=expected_output)
def testMaxPool3dSamePadding(self):
expected_output = [31., 32., 33., 34., 35., 36.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 2, 2, 3, 3],
window=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
expected=expected_output)
def testMaxPool3dSamePaddingDifferentStrides(self):
expected_output = [2., 5., 8., 18., 21., 24., 34., 37., 40.]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 8, 1, 1],
window=[1, 2, 3],
strides=[2, 3, 1],
padding="SAME",
expected=expected_output)
# Test pooling on a larger input, with different stride and kernel
# size for the 'z' dimension.
# Simulate max pooling in numpy to get the expected output.
input_data = np.arange(1, 5 * 27 * 27 * 64 + 1).reshape((5, 27, 27, 64))
input_data = np.pad(input_data, [[0, 0], [0, 1], [0, 1], [0, 0]],
mode="constant")
expected_output = input_data[:, 1::2, 1::2, :]
expected_output[:, -1, :, :] = input_data[:, -2, 1::2, :]
expected_output[:, :, -1, :] = input_data[:, 1::2, -2, :]
expected_output[:, -1, -1, :] = input_data[:, -2, -2, :]
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 5, 27, 27, 64],
window=[1, 2, 2],
strides=[1, 2, 2],
padding="SAME",
expected=expected_output.flatten())
def testKernelSmallerThanStride(self):
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.max_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[58, 61, 79, 82, 205, 208, 226, 229])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 3, 3, 3, 1],
window=[1, 1, 1],
strides=[2, 2, 2],
padding="SAME",
expected=[1, 3, 7, 9, 19, 21, 25, 27])
self._VerifyValues(
nn_ops.avg_pool3d,
input_sizes=[1, 7, 7, 7, 1],
window=[2, 2, 2],
strides=[3, 3, 3],
padding="VALID",
expected=[29.5, 32.5, 50.5, 53.5, 176.5, 179.5, 197.5, 200.5])
def _VerifyGradient(self,
pool_func,
pool_grad_func,
input_sizes,
ksize,
strides,
padding,
pool_grad_grad_func=None):
"""Verifies the output values of the pooling gradient function.
Args:
pool_func: Forward pooling function
pool_grad_func: Pooling gradient function for pool_grad_func
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
pool_grad_grad_func: Second-order gradient function, if available.
"""
ksize = [1] + ksize + [1]
strides = [1] + strides + [1]
total_size = np.prod(input_sizes)
x = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_sizes)
with self.session() as sess:
# Use the forward pool function to compute some corresponding outputs
# (needed for the CPU device, and we need the shape in both cases).
with ops.device("CPU"):
inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes)
outputs = pool_func(
inputs,
ksize=ksize,
strides=strides,
padding=padding)
output_vals = np.array(sess.run(outputs, {inputs: x}))
output_gradient_vals = np.arange(
1, output_vals.size + 1, dtype=np.float32)
output_gradient_vals = output_gradient_vals.reshape(output_vals.shape)
output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32)
output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape)
# Use the Tensorflow CPU pooling gradient to compute the expected input
# gradients.
with ops.device("CPU"):
output_gradients = array_ops.placeholder(
dtypes.float32, shape=output_vals.shape)
expected_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
expected_input_gradient_vals = sess.run(
expected_input_gradients,
{inputs: x,
output_gradients: output_gradient_vals})
output_grad_gradients = array_ops.placeholder(
dtypes.float32, shape=expected_input_gradient_vals.shape)
if pool_grad_grad_func is not None:
expected_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NDHWC")
expected_grad_gradients_vals = sess.run(expected_grad_gradients, {
inputs: x,
output_grad_gradients: output_grad_grad_vals
})
# Run the gradient op on the XLA device
with self.test_scope():
outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape)
actual_input_gradients = pool_grad_func(
inputs,
outputs,
output_gradients,
ksize=ksize,
strides=strides,
padding=padding)
if pool_grad_grad_func is not None:
actual_grad_gradients = pool_grad_grad_func(
inputs,
outputs,
output_grad_gradients,
ksize=ksize,
strides=strides,
padding=padding,
data_format="NDHWC")
actual = sess.run(actual_input_gradients, {
inputs: x,
outputs: output_vals,
output_gradients: output_gradient_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_input_gradient_vals.flatten(),
actual.flatten(),
rtol=1e-5,
atol=1e-6)
self.assertShapeEqual(actual, inputs)
if pool_grad_grad_func is not None:
actual_grad_gradients_vals = sess.run(
actual_grad_gradients, {
inputs: x,
outputs: output_vals,
output_grad_gradients: output_grad_grad_vals
})
# Compare the Tensorflow and XLA results.
self.assertAllClose(
expected_grad_gradients_vals,
actual_grad_gradients_vals,
rtol=1e-4,
atol=1e-6)
self.assertShapeEqual(actual_grad_gradients_vals, outputs)
def testMaxPoolGradValidPadding1_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[1, 3, 3, 3, 1],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="VALID",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradValidPadding2_1_6_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 3, 6, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradValidPadding2_1_7_3d(self):
# TODO(b/73062247): the bfloat16 implementation of MaxPool3DGradGrad does
# not have enough precision for this test case to pass if
# pool_grad_grad_func is passed.
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 5, 7, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID")
def testMaxPoolGradValidPadding2_2_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 2, 2, 2, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding1_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 2, 4, 1],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding2_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 3, 2, 4, 1],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding2_2_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[2, 5, 2, 4, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testMaxPoolGradSamePadding3_1_3d(self):
self._VerifyGradient(
nn_ops.max_pool3d,
gen_nn_ops.max_pool3d_grad,
input_sizes=[1, 3, 3, 7, 1],
ksize=[3, 3, 3],
strides=[1, 1, 1],
padding="SAME",
pool_grad_grad_func=gen_nn_ops.max_pool3d_grad_grad)
def testAvgPoolGradValidPadding1_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 3, 3, 3],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="VALID")
def testAvgPoolGradValidPadding2_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 3, 3, 3],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="VALID")
def testAvgPoolGradValidPadding2_2_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 2, 2, 2, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="VALID")
def testAvgPoolGradSamePadding1_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 3, 2, 4, 3],
ksize=[1, 1, 1],
strides=[1, 1, 1],
padding="SAME")
def testAvgPoolGradSamePadding2_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[1, 2, 2, 2, 1],
ksize=[2, 2, 2],
strides=[1, 1, 1],
padding="SAME")
def testAvgPoolGradSamePadding2_2_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[2, 5, 2, 4, 3],
ksize=[2, 2, 2],
strides=[2, 2, 2],
padding="SAME")
def testAvgPoolGradSamePadding3_1_3d(self):
self._VerifyGradient(
nn_ops.avg_pool3d,
_AvgPoolGrad,
input_sizes=[1, 3, 6, 7, 1],
ksize=[3, 3, 3],
strides=[1, 1, 1],
padding="SAME")
if __name__ == "__main__":
test.main()
|
Pooling3DTest
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-dbt/prefect_dbt/cloud/jobs.py
|
{
"start": 36998,
"end": 43782
}
|
class ____(JobBlock):
"""
Block that holds the information and methods to interact with a dbt Cloud job.
Attributes:
dbt_cloud_credentials: The credentials to use to authenticate with dbt Cloud.
job_id: The id of the dbt Cloud job.
timeout_seconds: The number of seconds to wait for the job to complete.
interval_seconds:
The number of seconds to wait between polling for job completion.
trigger_job_run_options: The options to use when triggering a job run.
Examples:
Load a configured dbt Cloud job block.
```python
from prefect_dbt.cloud import DbtCloudJob
dbt_cloud_job = DbtCloudJob.load("BLOCK_NAME")
```
Triggers a dbt Cloud job, waits for completion, and fetches the results.
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials, DbtCloudJob
@flow
def dbt_cloud_job_flow():
dbt_cloud_credentials = DbtCloudCredentials.load("dbt-token")
dbt_cloud_job = DbtCloudJob.load(
dbt_cloud_credentials=dbt_cloud_credentials,
job_id=154217
)
dbt_cloud_job_run = dbt_cloud_job.trigger()
dbt_cloud_job_run.wait_for_completion()
dbt_cloud_job_run.fetch_result()
return dbt_cloud_job_run
dbt_cloud_job_flow()
```
"""
_block_type_name = "dbt Cloud Job"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
dbt_cloud_credentials: DbtCloudCredentials = Field(
default=...,
description="The dbt Cloud credentials to use to authenticate with dbt Cloud.",
) # noqa: E501
job_id: int = Field(
default=..., description="The id of the dbt Cloud job.", title="Job ID"
)
timeout_seconds: int = Field(
default=900,
description="The number of seconds to wait for the job to complete.",
)
interval_seconds: int = Field(
default=10,
description="The number of seconds to wait between polling for job completion.",
)
trigger_job_run_options: TriggerJobRunOptions = Field(
default_factory=TriggerJobRunOptions,
description="The options to use when triggering a job run.",
)
@sync_compatible
async def get_job(self, order_by: Optional[str] = None) -> Dict[str, Any]:
"""
Retrieve information about a dbt Cloud job.
Args:
order_by: The field to order the results by.
Returns:
The job data.
"""
try:
async with self.dbt_cloud_credentials.get_administrative_client() as client:
response = await client.get_job(
job_id=self.job_id,
order_by=order_by,
)
except HTTPStatusError as ex:
raise DbtCloudGetJobFailed(extract_user_message(ex)) from ex
return response.json()["data"]
@sync_compatible
async def trigger(
self, trigger_job_run_options: Optional[TriggerJobRunOptions] = None
) -> DbtCloudJobRun:
"""
Triggers a dbt Cloud job.
Returns:
A representation of the dbt Cloud job run.
"""
try:
trigger_job_run_options = (
trigger_job_run_options or self.trigger_job_run_options
)
async with self.dbt_cloud_credentials.get_administrative_client() as client:
response = await client.trigger_job_run(
job_id=self.job_id, options=trigger_job_run_options
)
except HTTPStatusError as ex:
raise DbtCloudJobRunTriggerFailed(extract_user_message(ex)) from ex
run_data = response.json()["data"]
run_id = run_data.get("id")
run = DbtCloudJobRun(
dbt_cloud_job=self,
run_id=run_id,
)
self.logger.info(
f"dbt Cloud job {self.job_id} run {run_id} successfully triggered. "
f"You can view the status of this run at "
f"https://{self.dbt_cloud_credentials.domain}/#/accounts/"
f"{self.dbt_cloud_credentials.account_id}/projects/"
f"{run_data['project_id']}/runs/{run_id}/"
)
return run
@flow
async def run_dbt_cloud_job(
dbt_cloud_job: DbtCloudJob,
targeted_retries: int = 3,
) -> Dict[str, Any]:
"""
Flow that triggers and waits for a dbt Cloud job run, retrying a
subset of failed nodes if necessary.
Args:
dbt_cloud_job: Block that holds the information and
methods to interact with a dbt Cloud job.
targeted_retries: The number of times to retry failed steps.
Examples:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials, DbtCloudJob
from prefect_dbt.cloud.jobs import run_dbt_cloud_job
@flow
def run_dbt_cloud_job_flow():
dbt_cloud_credentials = DbtCloudCredentials.load("dbt-token")
dbt_cloud_job = DbtCloudJob(
dbt_cloud_credentials=dbt_cloud_credentials, job_id=154217
)
return run_dbt_cloud_job(dbt_cloud_job=dbt_cloud_job)
run_dbt_cloud_job_flow()
```
"""
logger = get_run_logger()
run = await task(dbt_cloud_job.trigger.aio)(dbt_cloud_job)
# Always try waiting for completion at least once
try:
await task(run.wait_for_completion.aio)(run)
result = await task(run.fetch_result.aio)(run)
return result
except DbtCloudJobRunFailed:
if targeted_retries <= 0:
raise DbtCloudJobRunFailed(
f"dbt Cloud job {run.run_id} failed after {targeted_retries} retries."
)
# Continue with retries if targeted_retries > 0
remaining_retries = targeted_retries
while remaining_retries > 0:
logger.info(
f"Retrying job run with ID: {run.run_id} {remaining_retries} more times"
)
run = await task(run.retry_failed_steps.aio)(run)
remaining_retries -= 1
try:
await task(run.wait_for_completion.aio)(run)
result = await task(run.fetch_result.aio)(run)
return result
except DbtCloudJobRunFailed:
if remaining_retries <= 0:
break
raise DbtCloudJobRunFailed(
f"dbt Cloud job {run.run_id} failed after {targeted_retries} retries."
)
|
DbtCloudJob
|
python
|
pytorch__pytorch
|
test/dynamo/test_misc.py
|
{
"start": 421911,
"end": 432800
}
|
class ____(torch._inductor.test_case.TestCase):
@parametrize_pytree_module
def test_tracing_pytree(self, pytree):
def fn(xs):
flat_xs, spec = pytree.tree_flatten(xs)
res = [x.clone() for x in flat_xs]
if pytree.__name__ == "optree":
# The treespec argument comes first in OpTree / JAX PyTree
return pytree.tree_unflatten(spec, res)
return pytree.tree_unflatten(res, spec)
xs = [torch.tensor(i) for i in range(3)]
counter = CompileCounter()
torch.compile(fn, backend=counter, fullgraph=True)(xs)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 3)
@parametrize_pytree_module
def test_tracing_nested_pytree(self, pytree):
def fn(xs):
flat_xs, spec = pytree.tree_flatten(xs)
res = [x.clone() for x in flat_xs]
if pytree.__name__ == "optree":
# The treespec argument comes first in OpTree / JAX PyTree
return pytree.tree_unflatten(spec, res)
return pytree.tree_unflatten(res, spec)
xs = [torch.tensor(i) for i in range(3)]
xsl = [xs, xs, xs, xs]
counter = CompileCounter()
comp_out = torch.compile(fn, backend=counter, fullgraph=True)(xsl)
real_out = fn(xsl)
self.assertEqual(comp_out, real_out)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 12)
@parametrize_pytree_module
def test_tracing_nested_tuples(self, pytree):
def fn(xs):
flat_xs, spec = pytree.tree_flatten(xs)
res = [x.clone() for x in flat_xs]
if pytree.__name__ == "optree":
# The treespec argument comes first in OpTree / JAX PyTree
return pytree.tree_unflatten(spec, res)
return pytree.tree_unflatten(res, spec)
xs = [torch.tensor(i) for i in range(3)]
xsl = (xs, xs, xs, xs)
counter = CompileCounter()
comp_out = torch.compile(fn, backend=counter, fullgraph=True)(xsl)
real_out = fn(xsl)
self.assertEqual(comp_out, real_out)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 12)
@parametrize_pytree_module
def test_tracing_nested_dicts(self, pytree):
def fn(xs):
flat_xs, spec = pytree.tree_flatten(xs)
res = [x.clone() for x in flat_xs]
if pytree.__name__ == "optree":
# The treespec argument comes first in OpTree / JAX PyTree
return pytree.tree_unflatten(spec, res)
return pytree.tree_unflatten(res, spec)
xs = [torch.tensor(i) for i in range(3)]
xsl = {
"a": xs,
"b": xs,
"c": xs,
}
counter = CompileCounter()
comp_out = torch.compile(fn, backend=counter, fullgraph=True)(xsl)
real_out = fn(xsl)
self.assertEqual(comp_out, real_out)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 9)
@parametrize_pytree_module
def test_tracing_nested_mixed_all(self, pytree):
def fn(xs):
flat_xs, spec = pytree.tree_flatten(xs)
res = [x.clone() for x in flat_xs]
if pytree.__name__ == "optree":
# The treespec argument comes first in OpTree / JAX PyTree
return pytree.tree_unflatten(spec, res)
return pytree.tree_unflatten(res, spec)
xs = [torch.tensor(i) for i in range(3)]
xsa = (xs, xs)
xsb = {"aa": xsa, "ab": xs}
xsl = {
"a": xs,
"b": xsa,
"c": xsb,
}
counter = CompileCounter()
comp_out = torch.compile(fn, backend=counter, fullgraph=True)(xsl)
real_out = fn(xsl)
self.assertEqual(comp_out, real_out)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 18)
@parametrize_pytree_module
def test_tracing_nested_tensor_subclass(self, pytree):
from torch.testing._internal.two_tensor import TwoTensor
from torch.utils.checkpoint import checkpoint
def fn(xs):
nested_xs = [[xs]]
flat_xs, spec = pytree.tree_flatten(xs)
return flat_xs[0].clone()
# use checkpoint to trigger a "sourceless" tensor subclass
def checkpoint_fn(xs):
return checkpoint(fn, xs, use_reentrant=True)
xs = TwoTensor(torch.ones(2, 2), torch.ones(2, 2))
counter = CompileCounter()
torch.compile(checkpoint_fn, backend=counter, fullgraph=True)(xs)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 2)
@parametrize_pytree_module
def test_pytree_tree_leaves(self, pytree):
def fn(x):
tree = {
"a": [x, x - 1],
"b": x + 2,
"c": (
x,
3.0,
collections.deque([0.0, -x, 1, 2], maxlen=3),
),
"d": collections.OrderedDict(
{
"e": torch.return_types.qr((2 * x, None)),
"f": MyTuple(x, x + 1, torch.zeros(4, 3)),
},
),
}
leaves = pytree.tree_leaves(tree)
return leaves
x = torch.randn(3, 2)
expected = fn(x)
fn_opt = torch.compile(fullgraph=True)(fn)
actual = fn_opt(x)
self.assertEqual(actual, expected)
@parametrize_pytree_module
def test_pytree_tree_flatten_unflatten(self, pytree):
def fn(x, y):
tree = {
"a": [x, x - 1],
"b": x + 2,
"c": (
x,
3.0,
collections.deque([0.0, -x, 1, 2], maxlen=3),
),
"d": collections.OrderedDict(
{
"e": torch.return_types.qr((2 * x, None)),
"f": MyTuple(x, x + 1, torch.zeros(4, 3)),
},
),
}
leaves, treespec = pytree.tree_flatten(tree)
new_leaves = [
x - 1,
y,
x * y,
3.0,
y - 2,
1,
torch.zeros(2, 2),
2 * y,
-y,
x + y,
x - y,
torch.ones(3, 2),
1,
]
if pytree.__name__ == "optree":
# `None` is a internal node rather than leaf in default OpTree / JAX PyTree
new_leaves.pop()
# The treespec argument comes first in OpTree / JAX PyTree
new_tree = pytree.tree_unflatten(treespec, new_leaves)
else:
new_tree = pytree.tree_unflatten(new_leaves, treespec)
return leaves, new_tree
x = torch.randn(3, 2)
y = torch.randn(3, 2)
expected = fn(x, y)
fn_opt = torch.compile(fullgraph=True)(fn)
actual = fn_opt(x, y)
self.assertEqual(actual, expected)
@parametrize_pytree_module
def test_pytree_tree_map(self, pytree):
def fn(x, y):
tree1 = {
"a": [x, x - 1],
"b": x + 2,
"c": (
x,
3.0,
collections.deque([0.0, -x, 1, 2], maxlen=3),
),
"d": collections.OrderedDict(
{
"e": torch.return_types.qr((2 * x, None)),
"f": MyTuple(x, x + 1, torch.zeros(4, 3)),
},
),
}
tree2 = collections.OrderedDict(
[
("c", (y, 3.0, collections.deque([1, -y, 10.0]))),
("a", [y, y + 1]),
("b", y + 2),
(
"d",
{
"f": MyTuple(torch.ones(4, 3), -y, y + 1),
"e": torch.return_types.qr((2 * y, None)),
},
),
],
)
return pytree.tree_map(lambda u, v: (u, v), tree1, tree2)
x = torch.randn(3, 2)
y = torch.randn(3, 2)
expected = fn(x, y)
fn_opt = torch.compile(fullgraph=True)(fn)
actual = fn_opt(x, y)
self.assertEqual(actual, expected)
@parametrize_pytree_module
def test_pytree_tree_map_dict_order(self, pytree):
def fn(tree):
new_tree = pytree.tree_map(lambda x: x, tree)
return list(new_tree.keys()), list(new_tree.values())
x = torch.randn(3, 2)
fn_opt = torch.compile(fullgraph=True)(fn)
tree1 = {"b": x + 2, "a": x, "c": x - 1}
expected1 = fn(tree1)
actual1 = fn_opt(tree1)
self.assertEqual(actual1, expected1)
tree2 = collections.OrderedDict([("b", x + 2), ("a", x), ("c", x - 1)])
expected2 = fn(tree2)
actual2 = fn_opt(tree2)
self.assertEqual(actual2, expected2)
tree3 = collections.defaultdict(int, {"b": x + 2, "a": x, "c": x - 1})
expected3 = fn(tree3)
actual3 = fn_opt(tree3)
self.assertEqual(actual3, expected3)
@parametrize_pytree_module
def test_pytree_tree_map_only(self, pytree):
if not callable(getattr(pytree, "tree_map_only", None)):
# OpTree and JAX PyTree do not have `tree_map_only`
return
def fn(xs):
def mapper(x):
return x.clone()
y = pytree.tree_map_only(torch.Tensor, mapper, xs)
return y
xs = [torch.tensor(i) for i in range(3)] + ["hi"]
xsa = (xs, xs)
xsb = {"aa": xsa, "ab": xs}
counter = CompileCounter()
comp_out = torch.compile(fn, backend=counter, fullgraph=True)(xsb)
real_out = fn(xsb)
self.assertEqual(comp_out, real_out)
self.assertEqual(counter.frame_count, 1)
self.assertEqual(counter.op_count, 9)
def test_pytree_register_constant_with_side_effect(self):
class Foo:
pass
class Bar:
def __eq__(self, other):
return super().__eq__(other)
def __hash__(self):
return 0
python_pytree.register_constant(Bar)
@torch.compile(backend="eager", fullgraph=True)
def fn(x, obj):
obj.attr = {3: Bar()}
return x + 1
inp = torch.ones(3)
self.assertEqual(fn(inp, Foo()), inp + 1)
|
MiscTestsPyTree
|
python
|
joke2k__faker
|
faker/providers/ssn/he_IL/__init__.py
|
{
"start": 41,
"end": 830
}
|
class ____(SsnProvider):
def ssn(self) -> str:
"""
Returns an Israeli identity number, known as Teudat Zehut ("tz").
https://en.wikipedia.org/wiki/Israeli_identity_card
"""
newID = str(self.generator.random.randrange(111111, 99999999))
newID = newID.zfill(8)
theSum = 0
indexRange = [0, 2, 4, 6]
for i in indexRange:
digit = newID[i]
num = int(digit)
theSum = theSum + num
num = int(newID[i + 1]) * 2
if num > 9:
num = int(str(num)[0]) + int(str(num)[1])
theSum = theSum + num
lastDigit = theSum % 10
if lastDigit != 0:
lastDigit = 10 - lastDigit
return str(newID) + str(lastDigit)
|
Provider
|
python
|
scipy__scipy
|
benchmarks/benchmarks/optimize.py
|
{
"start": 16991,
"end": 20664
}
|
class ____(Benchmark):
"""
Benchmark the global optimizers using the go_benchmark_functions
suite
"""
timeout = 180
_functions = dict([
item for item in inspect.getmembers(gbf, inspect.isclass)
if (issubclass(item[1], gbf.Benchmark) and
item[0] not in ('Benchmark') and
not item[0].startswith('Problem'))
])
if not is_xslow():
_enabled_functions = []
elif 'SCIPY_GLOBAL_BENCH' in os.environ:
_enabled_functions = [x.strip() for x in
os.environ['SCIPY_GLOBAL_BENCH'].split(',')]
else:
_enabled_functions = list(_functions.keys())
params = [
_enabled_functions,
["success%", "<nfev>", "average time"],
['DE', 'basinh.', 'DA', 'DIRECT', 'SHGO'],
]
param_names = ["test function", "result type", "solver"]
def __init__(self):
self.enabled = is_xslow()
try:
self.numtrials = int(os.environ['SCIPY_GLOBAL_BENCH_NUMTRIALS'])
except (KeyError, ValueError):
self.numtrials = 100
self.dump_fn = os.path.join(os.path.dirname(__file__),
'..',
'global-bench-results.json',)
self.results = {}
def setup(self, name, ret_value, solver):
if name not in self._enabled_functions:
raise NotImplementedError("skipped")
# load json backing file
with open(self.dump_fn) as f:
self.results = json.load(f)
def teardown(self, name, ret_value, solver):
if not self.enabled:
return
with open(self.dump_fn, 'w') as f:
json.dump(self.results, f, indent=2, sort_keys=True)
def track_all(self, name, ret_value, solver):
if name in self.results and solver in self.results[name]:
# have we done the function, and done the solver?
# if so, then just return the ret_value
av_results = self.results[name]
if ret_value == 'success%':
return (100 * av_results[solver]['nsuccess']
/ av_results[solver]['ntrials'])
elif ret_value == '<nfev>':
return av_results[solver]['mean_nfev']
elif ret_value == 'average time':
return av_results[solver]['mean_time']
else:
raise ValueError()
klass = self._functions[name]
f = klass()
try:
b = _BenchOptimizers.from_funcobj(name, f)
with np.errstate(all='ignore'):
b.bench_run_global(methods=[solver],
numtrials=self.numtrials)
av_results = b.average_results()
if name not in self.results:
self.results[name] = {}
self.results[name][solver] = av_results[solver]
if ret_value == 'success%':
return (100 * av_results[solver]['nsuccess']
/ av_results[solver]['ntrials'])
elif ret_value == '<nfev>':
return av_results[solver]['mean_nfev']
elif ret_value == 'average time':
return av_results[solver]['mean_time']
else:
raise ValueError()
except Exception:
print("".join(traceback.format_exc()))
self.results[name] = "".join(traceback.format_exc())
def setup_cache(self):
if not self.enabled:
return
# create the logfile to start with
with open(self.dump_fn, 'w') as f:
json.dump({}, f, indent=2)
|
BenchGlobal
|
python
|
ray-project__ray
|
rllib/models/torch/modules/multi_head_attention.py
|
{
"start": 459,
"end": 2467
}
|
class ____(nn.Module):
"""A multi-head attention layer described in [1]."""
def __init__(
self, in_dim: int, out_dim: int, num_heads: int, head_dim: int, **kwargs
):
"""
in_dim: Dimension of input
out_dim: Dimension of output
num_heads: Number of attention heads
head_dim: Output dimension of each attention head
"""
super().__init__(**kwargs)
# No bias or non-linearity.
self._num_heads = num_heads
self._head_dim = head_dim
self._qkv_layer = SlimFC(
in_size=in_dim, out_size=3 * num_heads * head_dim, use_bias=False
)
self._linear_layer = SlimFC(
in_size=num_heads * head_dim, out_size=out_dim, use_bias=False
)
def forward(self, inputs: TensorType) -> TensorType:
L = list(inputs.size())[1] # length of segment
H = self._num_heads # number of attention heads
D = self._head_dim # attention head dimension
qkv = self._qkv_layer(inputs)
queries, keys, values = torch.chunk(input=qkv, chunks=3, dim=-1)
queries = queries[:, -L:] # only query based on the segment
queries = torch.reshape(queries, [-1, L, H, D])
keys = torch.reshape(keys, [-1, L, H, D])
values = torch.reshape(values, [-1, L, H, D])
score = torch.einsum("bihd,bjhd->bijh", queries, keys)
score = score / D**0.5
# causal mask of the same length as the sequence
mask = sequence_mask(torch.arange(1, L + 1), dtype=score.dtype)
mask = mask[None, :, :, None]
mask = mask.float()
masked_score = score * mask + 1e30 * (mask - 1.0)
wmat = nn.functional.softmax(masked_score, dim=2)
out = torch.einsum("bijh,bjhd->bihd", wmat, values)
shape = list(out.size())[:2] + [H * D]
# temp = torch.cat(temp2, [H * D], dim=0)
out = torch.reshape(out, shape)
return self._linear_layer(out)
|
MultiHeadAttention
|
python
|
gevent__gevent
|
src/greentest/3.10/test_context.py
|
{
"start": 449,
"end": 10625
}
|
class ____(unittest.TestCase):
def test_context_var_new_1(self):
with self.assertRaisesRegex(TypeError, 'takes exactly 1'):
contextvars.ContextVar()
with self.assertRaisesRegex(TypeError, 'must be a str'):
contextvars.ContextVar(1)
c = contextvars.ContextVar('aaa')
self.assertEqual(c.name, 'aaa')
with self.assertRaises(AttributeError):
c.name = 'bbb'
self.assertNotEqual(hash(c), hash('aaa'))
@isolated_context
def test_context_var_repr_1(self):
c = contextvars.ContextVar('a')
self.assertIn('a', repr(c))
c = contextvars.ContextVar('a', default=123)
self.assertIn('123', repr(c))
lst = []
c = contextvars.ContextVar('a', default=lst)
lst.append(c)
self.assertIn('...', repr(c))
self.assertIn('...', repr(lst))
t = c.set(1)
self.assertIn(repr(c), repr(t))
self.assertNotIn(' used ', repr(t))
c.reset(t)
self.assertIn(' used ', repr(t))
def test_context_subclassing_1(self):
with self.assertRaisesRegex(TypeError, 'not an acceptable base type'):
class MyContextVar(contextvars.ContextVar):
# Potentially we might want ContextVars to be subclassable.
pass
with self.assertRaisesRegex(TypeError, 'not an acceptable base type'):
class MyContext(contextvars.Context):
pass
with self.assertRaisesRegex(TypeError, 'not an acceptable base type'):
class MyToken(contextvars.Token):
pass
def test_context_new_1(self):
with self.assertRaisesRegex(TypeError, 'any arguments'):
contextvars.Context(1)
with self.assertRaisesRegex(TypeError, 'any arguments'):
contextvars.Context(1, a=1)
with self.assertRaisesRegex(TypeError, 'any arguments'):
contextvars.Context(a=1)
contextvars.Context(**{})
def test_context_typerrors_1(self):
ctx = contextvars.Context()
with self.assertRaisesRegex(TypeError, 'ContextVar key was expected'):
ctx[1]
with self.assertRaisesRegex(TypeError, 'ContextVar key was expected'):
1 in ctx
with self.assertRaisesRegex(TypeError, 'ContextVar key was expected'):
ctx.get(1)
def test_context_get_context_1(self):
ctx = contextvars.copy_context()
self.assertIsInstance(ctx, contextvars.Context)
def test_context_run_1(self):
ctx = contextvars.Context()
with self.assertRaisesRegex(TypeError, 'missing 1 required'):
ctx.run()
def test_context_run_2(self):
ctx = contextvars.Context()
def func(*args, **kwargs):
kwargs['spam'] = 'foo'
args += ('bar',)
return args, kwargs
for f in (func, functools.partial(func)):
# partial doesn't support FASTCALL
self.assertEqual(ctx.run(f), (('bar',), {'spam': 'foo'}))
self.assertEqual(ctx.run(f, 1), ((1, 'bar'), {'spam': 'foo'}))
self.assertEqual(
ctx.run(f, a=2),
(('bar',), {'a': 2, 'spam': 'foo'}))
self.assertEqual(
ctx.run(f, 11, a=2),
((11, 'bar'), {'a': 2, 'spam': 'foo'}))
a = {}
self.assertEqual(
ctx.run(f, 11, **a),
((11, 'bar'), {'spam': 'foo'}))
self.assertEqual(a, {})
def test_context_run_3(self):
ctx = contextvars.Context()
def func(*args, **kwargs):
1 / 0
with self.assertRaises(ZeroDivisionError):
ctx.run(func)
with self.assertRaises(ZeroDivisionError):
ctx.run(func, 1, 2)
with self.assertRaises(ZeroDivisionError):
ctx.run(func, 1, 2, a=123)
@isolated_context
def test_context_run_4(self):
ctx1 = contextvars.Context()
ctx2 = contextvars.Context()
var = contextvars.ContextVar('var')
def func2():
self.assertIsNone(var.get(None))
def func1():
self.assertIsNone(var.get(None))
var.set('spam')
ctx2.run(func2)
self.assertEqual(var.get(None), 'spam')
cur = contextvars.copy_context()
self.assertEqual(len(cur), 1)
self.assertEqual(cur[var], 'spam')
return cur
returned_ctx = ctx1.run(func1)
self.assertEqual(ctx1, returned_ctx)
self.assertEqual(returned_ctx[var], 'spam')
self.assertIn(var, returned_ctx)
def test_context_run_5(self):
ctx = contextvars.Context()
var = contextvars.ContextVar('var')
def func():
self.assertIsNone(var.get(None))
var.set('spam')
1 / 0
with self.assertRaises(ZeroDivisionError):
ctx.run(func)
self.assertIsNone(var.get(None))
def test_context_run_6(self):
ctx = contextvars.Context()
c = contextvars.ContextVar('a', default=0)
def fun():
self.assertEqual(c.get(), 0)
self.assertIsNone(ctx.get(c))
c.set(42)
self.assertEqual(c.get(), 42)
self.assertEqual(ctx.get(c), 42)
ctx.run(fun)
def test_context_run_7(self):
ctx = contextvars.Context()
def fun():
with self.assertRaisesRegex(RuntimeError, 'is already entered'):
ctx.run(fun)
ctx.run(fun)
@isolated_context
def test_context_getset_1(self):
c = contextvars.ContextVar('c')
with self.assertRaises(LookupError):
c.get()
self.assertIsNone(c.get(None))
t0 = c.set(42)
self.assertEqual(c.get(), 42)
self.assertEqual(c.get(None), 42)
self.assertIs(t0.old_value, t0.MISSING)
self.assertIs(t0.old_value, contextvars.Token.MISSING)
self.assertIs(t0.var, c)
t = c.set('spam')
self.assertEqual(c.get(), 'spam')
self.assertEqual(c.get(None), 'spam')
self.assertEqual(t.old_value, 42)
c.reset(t)
self.assertEqual(c.get(), 42)
self.assertEqual(c.get(None), 42)
c.set('spam2')
with self.assertRaisesRegex(RuntimeError, 'has already been used'):
c.reset(t)
self.assertEqual(c.get(), 'spam2')
ctx1 = contextvars.copy_context()
self.assertIn(c, ctx1)
c.reset(t0)
with self.assertRaisesRegex(RuntimeError, 'has already been used'):
c.reset(t0)
self.assertIsNone(c.get(None))
self.assertIn(c, ctx1)
self.assertEqual(ctx1[c], 'spam2')
self.assertEqual(ctx1.get(c, 'aa'), 'spam2')
self.assertEqual(len(ctx1), 1)
self.assertEqual(list(ctx1.items()), [(c, 'spam2')])
self.assertEqual(list(ctx1.values()), ['spam2'])
self.assertEqual(list(ctx1.keys()), [c])
self.assertEqual(list(ctx1), [c])
ctx2 = contextvars.copy_context()
self.assertNotIn(c, ctx2)
with self.assertRaises(KeyError):
ctx2[c]
self.assertEqual(ctx2.get(c, 'aa'), 'aa')
self.assertEqual(len(ctx2), 0)
self.assertEqual(list(ctx2), [])
@isolated_context
def test_context_getset_2(self):
v1 = contextvars.ContextVar('v1')
v2 = contextvars.ContextVar('v2')
t1 = v1.set(42)
with self.assertRaisesRegex(ValueError, 'by a different'):
v2.reset(t1)
@isolated_context
def test_context_getset_3(self):
c = contextvars.ContextVar('c', default=42)
ctx = contextvars.Context()
def fun():
self.assertEqual(c.get(), 42)
with self.assertRaises(KeyError):
ctx[c]
self.assertIsNone(ctx.get(c))
self.assertEqual(ctx.get(c, 'spam'), 'spam')
self.assertNotIn(c, ctx)
self.assertEqual(list(ctx.keys()), [])
t = c.set(1)
self.assertEqual(list(ctx.keys()), [c])
self.assertEqual(ctx[c], 1)
c.reset(t)
self.assertEqual(list(ctx.keys()), [])
with self.assertRaises(KeyError):
ctx[c]
ctx.run(fun)
@isolated_context
def test_context_getset_4(self):
c = contextvars.ContextVar('c', default=42)
ctx = contextvars.Context()
tok = ctx.run(c.set, 1)
with self.assertRaisesRegex(ValueError, 'different Context'):
c.reset(tok)
@isolated_context
def test_context_getset_5(self):
c = contextvars.ContextVar('c', default=42)
c.set([])
def fun():
c.set([])
c.get().append(42)
self.assertEqual(c.get(), [42])
contextvars.copy_context().run(fun)
self.assertEqual(c.get(), [])
def test_context_copy_1(self):
ctx1 = contextvars.Context()
c = contextvars.ContextVar('c', default=42)
def ctx1_fun():
c.set(10)
ctx2 = ctx1.copy()
self.assertEqual(ctx2[c], 10)
c.set(20)
self.assertEqual(ctx1[c], 20)
self.assertEqual(ctx2[c], 10)
ctx2.run(ctx2_fun)
self.assertEqual(ctx1[c], 20)
self.assertEqual(ctx2[c], 30)
def ctx2_fun():
self.assertEqual(c.get(), 10)
c.set(30)
self.assertEqual(c.get(), 30)
ctx1.run(ctx1_fun)
@isolated_context
def test_context_threads_1(self):
cvar = contextvars.ContextVar('cvar')
def sub(num):
for i in range(10):
cvar.set(num + i)
time.sleep(random.uniform(0.001, 0.05))
self.assertEqual(cvar.get(), num + i)
return num
tp = concurrent.futures.ThreadPoolExecutor(max_workers=10)
try:
results = list(tp.map(sub, range(10)))
finally:
tp.shutdown()
self.assertEqual(results, list(range(10)))
# HAMT Tests
|
ContextTest
|
python
|
jina-ai__jina
|
jina/enums.py
|
{
"start": 5381,
"end": 5867
}
|
class ____(BetterEnum):
"""
Gateway communication protocol
"""
GRPC = 0
HTTP = 1
WEBSOCKET = 2
@classmethod
def from_string_list(cls, string_list: List[Union[str, 'ProtocolType']]):
"""
Returns a list of Enums from a list of strings or enums
:param string_list: list of strings or enums
:return: a list of Enums
"""
return [cls.from_string(s) if isinstance(s, str) else s for s in string_list]
|
ProtocolType
|
python
|
pytorch__pytorch
|
test/distributed/fsdp/test_fsdp_checkpoint.py
|
{
"start": 9459,
"end": 9940
}
|
class ____(nn.Module):
def __init__(self, checkpoint: bool = False, use_reentrant: bool = True):
super().__init__()
self.seq = nn.Sequential(*[nn.Linear(100, 100) for _ in range(4)])
self.checkpoint = checkpoint
self.use_reentrant = use_reentrant
def forward(self, x):
return (
checkpoint(self.seq, x, use_reentrant=self.use_reentrant)
if self.checkpoint
else self.seq(x)
)
|
CheckpointModule
|
python
|
django__django
|
tests/known_related_objects/models.py
|
{
"start": 300,
"end": 495
}
|
class ____(models.Model):
name = models.CharField(max_length=30)
tournament = models.ForeignKey(Tournament, models.CASCADE)
organiser = models.ForeignKey(Organiser, models.CASCADE)
|
Pool
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/sensors/test_ecs.py
|
{
"start": 4242,
"end": 7105
}
|
class ____(EcsBaseTestCase):
@pytest.mark.parametrize(
("return_state", "expected"), [("ACTIVE", True), ("PROVISIONING", False), ("DEPROVISIONING", False)]
)
def test_default_values_poke(self, return_state, expected):
task = self.create_rendered_task(EcsClusterStateSensor, cluster_name=TEST_CLUSTER_NAME)
with mock.patch.object(task.hook, "get_cluster_state") as m:
m.return_value = return_state
assert task.poke({}) == expected
m.assert_called_once_with(cluster_name=TEST_CLUSTER_NAME)
@pytest.mark.parametrize("return_state", ["FAILED", "INACTIVE"])
def test_default_values_terminal_state(self, return_state):
task = self.create_rendered_task(EcsClusterStateSensor, cluster_name=TEST_CLUSTER_NAME)
with mock.patch.object(task.hook, "get_cluster_state") as m:
m.return_value = return_state
with pytest.raises(AirflowException, match="Terminal state reached"):
task.poke({})
m.assert_called_once_with(cluster_name=TEST_CLUSTER_NAME)
@pytest.mark.parametrize(
("target_state", "return_state", "expected"),
[
(EcsClusterStates.ACTIVE, "ACTIVE", True),
(EcsClusterStates.ACTIVE, "DEPROVISIONING", False),
(EcsClusterStates.DEPROVISIONING, "ACTIVE", False),
(EcsClusterStates.DEPROVISIONING, "DEPROVISIONING", True),
],
)
def test_custom_values_poke(self, target_state, return_state, expected):
task = self.create_rendered_task(
EcsClusterStateSensor, cluster_name=TEST_CLUSTER_NAME, target_state=target_state
)
with mock.patch.object(task.hook, "get_cluster_state") as m:
m.return_value = return_state
assert task.poke({}) == expected
m.assert_called_once_with(cluster_name=TEST_CLUSTER_NAME)
@pytest.mark.parametrize(
("failure_states", "return_state"),
[
({EcsClusterStates.ACTIVE}, "ACTIVE"),
({EcsClusterStates.PROVISIONING, EcsClusterStates.DEPROVISIONING}, "DEPROVISIONING"),
({EcsClusterStates.PROVISIONING, EcsClusterStates.DEPROVISIONING}, "PROVISIONING"),
],
)
def test_custom_values_terminal_state(self, failure_states, return_state):
task = self.create_rendered_task(
EcsClusterStateSensor,
cluster_name=TEST_CLUSTER_NAME,
target_state=EcsClusterStates.FAILED,
failure_states=failure_states,
)
with mock.patch.object(task.hook, "get_cluster_state") as m:
m.return_value = return_state
with pytest.raises(AirflowException, match="Terminal state reached"):
task.poke({})
m.assert_called_once_with(cluster_name=TEST_CLUSTER_NAME)
|
TestEcsClusterStateSensor
|
python
|
pypa__warehouse
|
warehouse/manage/views/organizations.py
|
{
"start": 20869,
"end": 26152
}
|
class ____:
def __init__(self, organization, request):
self.organization = organization
self.request = request
self.billing_service = request.find_service(IBillingService, context=None)
self.subscription_service = request.find_service(
ISubscriptionService, context=None
)
self.organization_service = request.find_service(
IOrganizationService, context=None
)
@property
def customer_id(self):
if self.organization.customer is None:
customer = self.billing_service.create_customer(
name=self.organization.customer_name(
self.request.registry.settings["site.name"]
),
description=self.organization.description,
)
stripe_customer = self.subscription_service.add_stripe_customer(
customer_id=customer["id"],
)
self.organization_service.add_organization_stripe_customer(
organization_id=self.organization.id,
stripe_customer_id=stripe_customer.id,
)
return customer["id"]
return self.organization.customer.customer_id
@property
def price_id(self):
# Get or create default subscription price with subscription service.
default_subscription_price = (
self.subscription_service.get_or_create_default_subscription_price()
)
# Synchronize product and price with billing service.
self.billing_service.sync_product(
default_subscription_price.subscription_product
)
self.billing_service.sync_price(default_subscription_price)
return default_subscription_price.price_id
@property
def return_url(self):
return urljoin(
self.request.application_url,
self.request.GET.get(
"next", self.request.route_path("manage.organizations")
),
)
def create_subscription(self):
# Create checkout session.
checkout_session = self.billing_service.create_checkout_session(
customer_id=self.customer_id,
price_ids=[self.price_id],
success_url=self.return_url,
cancel_url=self.return_url,
)
create_subscription_url = checkout_session["url"]
if isinstance(self.billing_service, MockStripeBillingService):
# Use local mock of billing UI.
create_subscription_url = self.request.route_path(
"mock.billing.checkout-session",
organization_name=self.organization.normalized_name,
)
return HTTPSeeOther(create_subscription_url)
def manage_subscription(self):
portal_session = self.billing_service.create_portal_session(
customer_id=self.customer_id,
return_url=self.return_url,
)
manage_subscription_url = portal_session["url"]
if isinstance(self.billing_service, MockStripeBillingService):
# Use local mock of billing UI.
manage_subscription_url = self.request.route_path(
"mock.billing.portal-session",
organization_name=self.organization.normalized_name,
)
return HTTPSeeOther(manage_subscription_url)
@view_config(
route_name="manage.organization.activate_subscription",
renderer="warehouse:templates/manage/organization/activate_subscription.html",
)
def activate_subscription(self):
form = OrganizationActivateBillingForm(self.request.POST)
if self.request.method == "POST" and form.validate():
self.organization_service.record_tos_engagement(
self.organization.id,
self.request.registry.settings.get("terms.revision"),
TermsOfServiceEngagement.Agreed,
)
route = self.request.route_path(
"manage.organization.subscription",
organization_name=self.organization.normalized_name,
)
return HTTPSeeOther(route)
return {"organization": self.organization, "form": form}
@view_config(route_name="manage.organization.subscription")
def create_or_manage_subscription(self):
# Organizations must be enabled.
if not self.request.organization_access:
raise HTTPNotFound()
if not self.organization.manageable_subscription:
# Create subscription if there are no manageable subscription.
# This occurs if no subscription exists, or all subscriptions have reached
# a terminal state of Canceled.
return self.create_subscription()
else:
# Manage subscription if there is an existing subscription.
return self.manage_subscription()
@view_defaults(
route_name="manage.organization.teams",
context=Organization,
renderer="warehouse:templates/manage/organization/teams.html",
uses_session=True,
require_active_organization=True,
require_csrf=True,
require_methods=False,
permission=Permissions.OrganizationTeamsManage,
has_translations=True,
require_reauth=True,
)
|
ManageOrganizationBillingViews
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-number-of-people-that-can-be-caught-in-tag.py
|
{
"start": 65,
"end": 631
}
|
class ____(object):
def catchMaximumAmountofPeople(self, team, dist):
"""
:type team: List[int]
:type dist: int
:rtype: int
"""
result = i = j = 0
while i < len(team) and j < len(team):
if i+dist < j or team[i] != 1:
i += 1
elif j+dist < i or team[j] != 0:
j += 1
else:
result += 1
i += 1
j += 1
return result
# Time: O(n)
# Space: O(1)
# greedy with sliding window solution
|
Solution
|
python
|
PrefectHQ__prefect
|
tests/test_exceptions.py
|
{
"start": 3464,
"end": 4285
}
|
class ____:
def test_from_bad_params(self):
expected = (
"Function expects parameters ['dog', 'cat'] but was provided with"
" parameters ['puppy', 'kitty']"
)
signature_mismatch_error = SignatureMismatchError.from_bad_params(
["dog", "cat"], ["puppy", "kitty"]
)
assert str(signature_mismatch_error) == expected
def test_pickle_roundtrip(self):
signature_mismatch_error = SignatureMismatchError.from_bad_params(
["dog", "cat"], ["puppy", "kitty"]
)
pickled = cloudpickle.dumps(signature_mismatch_error)
unpickled = cloudpickle.loads(pickled)
assert str(signature_mismatch_error) == str(unpickled)
assert signature_mismatch_error.args == unpickled.args
|
TestSignatureMismatchError
|
python
|
getsentry__sentry
|
src/social_auth/backends/__init__.py
|
{
"start": 6663,
"end": 8528
}
|
class ____(SocialAuthBackend):
"""OAuth authentication backend base class.
EXTRA_DATA defines a set of name that will be stored in
extra_data field. It must be a list of tuples with
name and alias.
Also settings will be inspected to get more values names that should be
stored on extra_data field. Setting name is created from current backend
name (all uppercase) plus _EXTRA_DATA.
access_token is always stored.
"""
EXTRA_DATA: list[tuple[str, str]] | None = None
ID_KEY = "id"
def get_user_id(self, details, response):
"""OAuth providers return an unique user id in response"""
return response[self.ID_KEY]
@classmethod
def extra_data(cls, user, uid, response, details=None):
"""Return access_token and extra defined names to store in
extra_data field"""
data = {"access_token": response.get("access_token", "")}
name = cls.name.replace("-", "_").upper()
names = (cls.EXTRA_DATA or []) + setting(name + "_EXTRA_DATA", [])
for entry in names:
if isinstance(entry, str):
entry = (entry,)
try:
if len(entry) == 3:
name, alias, discard = entry
elif len(entry) == 2:
(name, alias), discard = entry, False
elif len(entry) == 1:
(name,), (alias,), discard = entry, entry, False
else:
raise ValueError("invalid tuple for EXTRA_DATA entry" % entry)
value = response.get(name)
if discard and not value:
continue
data[alias] = value
except (TypeError, ValueError):
raise BackendError(f"invalid entry: {entry}")
return data
|
OAuthBackend
|
python
|
pydantic__pydantic
|
pydantic/experimental/pipeline.py
|
{
"start": 22852,
"end": 23582
}
|
class ____(Protocol):
def __len__(self) -> int: ...
_NewOutGt = TypeVar('_NewOutGt', bound=annotated_types.SupportsGt)
_NewOutGe = TypeVar('_NewOutGe', bound=annotated_types.SupportsGe)
_NewOutLt = TypeVar('_NewOutLt', bound=annotated_types.SupportsLt)
_NewOutLe = TypeVar('_NewOutLe', bound=annotated_types.SupportsLe)
_NewOutLen = TypeVar('_NewOutLen', bound=_SupportsLen)
_NewOutDiv = TypeVar('_NewOutDiv', bound=annotated_types.SupportsDiv)
_NewOutMod = TypeVar('_NewOutMod', bound=annotated_types.SupportsMod)
_NewOutDatetime = TypeVar('_NewOutDatetime', bound=datetime.datetime)
_NewOutInterval = TypeVar('_NewOutInterval', bound=_SupportsRange)
_OtherIn = TypeVar('_OtherIn')
_OtherOut = TypeVar('_OtherOut')
|
_SupportsLen
|
python
|
pandas-dev__pandas
|
pandas/tests/reshape/concat/test_dataframe.py
|
{
"start": 158,
"end": 9154
}
|
class ____:
@pytest.mark.xfail(reason="GH#62888 the `mi[2][1] is 1` check fails")
def test_concat_multiindex_level_bool_and_numeric(self):
# GH#21108, GH#45101
left = DataFrame([123, 456], columns=["data"], index=[True, False])
right = DataFrame(
[55, 983, 69, 112, 0], columns=["data"], index=[1, 2, 3, 4, 99]
)
result = concat({"One": left, "Two": right})
# in particular, the first two entries should not be cast to ints, the
# other 1 should not cast to True
mi = pd.MultiIndex.from_arrays(
[
["One"] * 2 + ["Two"] * 5,
np.array([True, False, 1, 2, 3, 4, 99], dtype=object),
],
)
assert mi[0][1] is True
assert type(mi[2][1]) is int
expected = DataFrame({"data": [123, 456, 55, 983, 69, 112, 0]}, index=mi)
tm.assert_frame_equal(result, expected)
def test_concat_multiple_frames_dtypes(self):
# GH#2759
df1 = DataFrame(data=np.ones((10, 2)), columns=["foo", "bar"], dtype=np.float64)
df2 = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
results = concat((df1, df2), axis=1).dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("float32")] * 2,
index=["foo", "bar", 0, 1],
)
tm.assert_series_equal(results, expected)
def test_concat_tuple_keys(self):
# GH#14438
df1 = DataFrame(np.ones((2, 2)), columns=list("AB"))
df2 = DataFrame(np.ones((3, 2)) * 2, columns=list("AB"))
results = concat((df1, df2), keys=[("bee", "bah"), ("bee", "boo")])
expected = DataFrame(
{
"A": {
("bee", "bah", 0): 1.0,
("bee", "bah", 1): 1.0,
("bee", "boo", 0): 2.0,
("bee", "boo", 1): 2.0,
("bee", "boo", 2): 2.0,
},
"B": {
("bee", "bah", 0): 1.0,
("bee", "bah", 1): 1.0,
("bee", "boo", 0): 2.0,
("bee", "boo", 1): 2.0,
("bee", "boo", 2): 2.0,
},
}
)
tm.assert_frame_equal(results, expected)
def test_concat_named_keys(self):
# GH#14252
df = DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]})
index = Index(["a", "b"], name="baz")
concatted_named_from_keys = concat([df, df], keys=index)
expected_named = DataFrame(
{"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=["baz", None]),
)
tm.assert_frame_equal(concatted_named_from_keys, expected_named)
index_no_name = Index(["a", "b"], name=None)
concatted_named_from_names = concat([df, df], keys=index_no_name, names=["baz"])
tm.assert_frame_equal(concatted_named_from_names, expected_named)
concatted_unnamed = concat([df, df], keys=index_no_name)
expected_unnamed = DataFrame(
{"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=[None, None]),
)
tm.assert_frame_equal(concatted_unnamed, expected_unnamed)
def test_concat_axis_parameter(self):
# GH#14369
df1 = DataFrame({"A": [0.1, 0.2]}, index=range(2))
df2 = DataFrame({"A": [0.3, 0.4]}, index=range(2))
# Index/row/0 DataFrame
expected_index = DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
concatted_index = concat([df1, df2], axis="index")
tm.assert_frame_equal(concatted_index, expected_index)
concatted_row = concat([df1, df2], axis="rows")
tm.assert_frame_equal(concatted_row, expected_index)
concatted_0 = concat([df1, df2], axis=0)
tm.assert_frame_equal(concatted_0, expected_index)
# Columns/1 DataFrame
expected_columns = DataFrame(
[[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=["A", "A"]
)
concatted_columns = concat([df1, df2], axis="columns")
tm.assert_frame_equal(concatted_columns, expected_columns)
concatted_1 = concat([df1, df2], axis=1)
tm.assert_frame_equal(concatted_1, expected_columns)
series1 = Series([0.1, 0.2])
series2 = Series([0.3, 0.4])
# Index/row/0 Series
expected_index_series = Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
concatted_index_series = concat([series1, series2], axis="index")
tm.assert_series_equal(concatted_index_series, expected_index_series)
concatted_row_series = concat([series1, series2], axis="rows")
tm.assert_series_equal(concatted_row_series, expected_index_series)
concatted_0_series = concat([series1, series2], axis=0)
tm.assert_series_equal(concatted_0_series, expected_index_series)
# Columns/1 Series
expected_columns_series = DataFrame(
[[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1]
)
concatted_columns_series = concat([series1, series2], axis="columns")
tm.assert_frame_equal(concatted_columns_series, expected_columns_series)
concatted_1_series = concat([series1, series2], axis=1)
tm.assert_frame_equal(concatted_1_series, expected_columns_series)
# Testing ValueError
with pytest.raises(ValueError, match="No axis named"):
concat([series1, series2], axis="something")
def test_concat_numerical_names(self):
# GH#15262, GH#12223
df = DataFrame(
{"col": range(9)},
dtype="int32",
index=(
pd.MultiIndex.from_product(
[["A0", "A1", "A2"], ["B0", "B1", "B2"]], names=[1, 2]
)
),
)
result = concat((df.iloc[:2, :], df.iloc[-2:, :]))
expected = DataFrame(
{"col": [0, 1, 7, 8]},
dtype="int32",
index=pd.MultiIndex.from_tuples(
[("A0", "B0"), ("A0", "B1"), ("A2", "B1"), ("A2", "B2")], names=[1, 2]
),
)
tm.assert_frame_equal(result, expected)
def test_concat_astype_dup_col(self):
# GH#23049
df = DataFrame([{"a": "b"}])
df = concat([df, df], axis=1)
result = df.astype("category")
expected = DataFrame(
np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"]
).astype("category")
tm.assert_frame_equal(result, expected)
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_bool_with_int(self):
# GH#42092 we may want to change this to return object, but that
# would need a deprecation
df1 = DataFrame(Series([True, False, True, True], dtype="bool"))
df2 = DataFrame(Series([1, 0, 1], dtype="int64"))
result = concat([df1, df2])
expected = concat([df1.astype("int64"), df2])
tm.assert_frame_equal(result, expected)
def test_concat_duplicates_in_index_with_keys(self):
# GH#42651
index = [1, 1, 3]
data = [1, 2, 3]
df = DataFrame(data=data, index=index)
result = concat([df], keys=["A"], names=["ID", "date"])
mi = pd.MultiIndex.from_product([["A"], index], names=["ID", "date"])
expected = DataFrame(data=data, index=mi)
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(result.index.levels[1], Index([1, 3], name="date"))
def test_outer_sort_columns(self):
# GH#47127
df1 = DataFrame({"A": [0], "B": [1], 0: 1})
df2 = DataFrame({"A": [100]})
result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
expected = DataFrame({0: [1.0, np.nan], "A": [0, 100], "B": [1.0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_inner_sort_columns(self):
# GH#47127
df1 = DataFrame({"A": [0], "B": [1], 0: 1})
df2 = DataFrame({"A": [100], 0: 2})
result = concat([df1, df2], ignore_index=True, join="inner", sort=True)
expected = DataFrame({0: [1, 2], "A": [0, 100]})
tm.assert_frame_equal(result, expected)
def test_sort_columns_one_df(self):
# GH#47127
df1 = DataFrame({"A": [100], 0: 2})
result = concat([df1], ignore_index=True, join="inner", sort=True)
expected = DataFrame({0: [2], "A": [100]})
tm.assert_frame_equal(result, expected)
|
TestDataFrameConcat
|
python
|
apache__airflow
|
dev/breeze/src/airflow_breeze/commands/main_command.py
|
{
"start": 2897,
"end": 12565
}
|
class ____(BreezeGroup):
def get_command(self, ctx: Context, cmd_name: str):
# Aliases for important commands moved to sub-commands or deprecated commands
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
if cmd_name == "static-checks":
# version alias does not need to be deprecated. It's ok to keep it also at top level
# even if it is not displayed in help
print_removed(
"static-checks",
"prek",
"\nYou can install prek with:\n"
"\n[special]uv tool install prek[/]\n\n"
"Followed by (in airflow repo):\n\n"
"[special]prek install -f[/]\n",
)
sys.exit(1)
return None
@click.group(
cls=MainGroupWithAliases,
invoke_without_command=True,
context_settings={"help_option_names": ["-h", "--help"]},
)
@option_answer
@option_auth_manager
@option_backend
@option_builder
@option_db_reset
@option_docker_host
@option_dry_run
@option_forward_credentials
@option_github_repository
@option_all_integration
@option_max_time
@option_mysql_version
@option_postgres_version
@option_python
@option_project_name
@option_standalone_dag_processor
@option_use_uv
@option_uv_http_timeout
@option_verbose
@click.pass_context
def main(ctx: click.Context, **kwargs: dict[str, Any]):
from airflow_breeze.commands.developer_commands import shell
check_for_rosetta_environment()
check_for_python_emulation()
generate_provider_dependencies_if_needed()
if not ctx.invoked_subcommand:
ctx.forward(shell, extra_args={})
def check_for_python_emulation():
try:
system_machine = subprocess.check_output(["uname", "-m"], text=True).strip()
python_machine = platform.uname().machine
if system_machine != python_machine:
from airflow_breeze.utils.console import get_console
get_console().print(
f"\n\n[error]Your Python architecture is {python_machine} and "
f"system architecture is {system_machine}[/]"
)
get_console().print(
"[warning]This is very bad and your Python is 10x slower as it is emulated[/]"
)
get_console().print(
"[warning]You likely installed your Python wrongly and you should "
"remove it and reinstall from scratch[/]\n"
)
from inputimeout import TimeoutOccurred, inputimeout
try:
user_status = inputimeout(
prompt="Are you REALLY sure you want to continue? "
"(answer with y otherwise we exit in 20s)\n",
timeout=20,
)
if user_status.upper() not in ["Y", "YES"]:
sys.exit(1)
except TimeoutOccurred:
from airflow_breeze.utils.console import get_console
get_console().print("\nNo answer, exiting...")
sys.exit(1)
except FileNotFoundError:
pass
except subprocess.CalledProcessError:
pass
except PermissionError:
pass
def check_for_rosetta_environment():
if sys.platform != "darwin" or platform.processor() == "i386":
return
from inputimeout import TimeoutOccurred, inputimeout
try:
runs_in_rosetta = subprocess.check_output(
["sysctl", "-n", "sysctl.proc_translated"],
text=True,
stderr=subprocess.DEVNULL,
).strip()
if runs_in_rosetta == "1":
from airflow_breeze.utils.console import get_console
get_console().print(
"\n\n[error]You are starting breeze in `rosetta 2` emulated environment on Mac[/]\n"
)
get_console().print(
"[warning]This is very bad and your Python is 10x slower as it is emulated[/]\n"
)
get_console().print(
"You have emulated Python interpreter (Intel rather than ARM). You should check:\n\n"
' * Your IDE (PyCharm/VSCode/Intellij): the "About" window should show `aarch64` '
'not `x86_64` in "Runtime version".\n'
' * Your python: run "python -c '
'import platform; print(platform.uname().machine)"). '
"It should show `arm64` not `x86_64`.\n"
' * Your `brew`: run "brew config" and it should show `arm` in CPU line not `x86`.\n\n'
"If you have mixed Intel/ARM binaries installed you should likely nuke and "
"reinstall your development environment (including brew and Python) from scratch!\n\n"
)
user_status = inputimeout(
prompt="Are you REALLY sure you want to continue? (answer with y otherwise we exit in 20s)\n",
timeout=20,
)
if user_status.upper() not in ["Y", "YES"]:
sys.exit(1)
except TimeoutOccurred:
get_console().print("\nNo answer, exiting...")
sys.exit(1)
except subprocess.CalledProcessError:
pass
except PermissionError:
pass
@main.command(
name="cleanup",
help="Cleans the cache of parameters, docker cache and optionally built CI/PROD images.",
)
@click.option(
"--all",
is_flag=True,
help="Also remove currently downloaded Breeze images.",
)
@option_verbose
@option_dry_run
@option_answer
def cleanup(all: bool):
if all:
get_console().print(
"\n[info]Removing cache of parameters, clean up docker cache "
"and remove locally downloaded images[/]"
)
else:
get_console().print("[info]Removing cache of parameters, and cleans up docker cache[/]")
if all:
docker_images_command_to_execute = [
"docker",
"images",
"--filter",
"label=org.apache.airflow.image",
"--format",
"{{.Repository}}:{{.Tag}}",
]
command_result = run_command(docker_images_command_to_execute, text=True, capture_output=True)
images = command_result.stdout.splitlines() if command_result and command_result.stdout else []
if images:
get_console().print("[info]Removing images:[/]")
for image in images:
get_console().print(f"[info] * {image}[/]")
get_console().print()
docker_rmi_command_to_execute = [
"docker",
"rmi",
"--force",
]
docker_rmi_command_to_execute.extend(images)
given_answer = user_confirm("Are you sure with the removal?")
if given_answer == Answer.YES:
run_command(docker_rmi_command_to_execute, check=False)
elif given_answer == Answer.QUIT:
sys.exit(0)
else:
get_console().print("[info]No locally downloaded images to remove[/]\n")
get_console().print("Removing networks created by breeze")
given_answer = user_confirm("Are you sure with the removal of docker networks created by breeze?")
if given_answer == Answer.YES:
remove_docker_networks()
get_console().print("Removing volumes created by breeze")
given_answer = user_confirm("Are you sure with the removal of docker volumes created by breeze?")
if given_answer == Answer.YES:
remove_docker_volumes()
get_console().print("Pruning docker images")
given_answer = user_confirm("Are you sure with the removal of docker images?")
if given_answer == Answer.YES:
system_prune_command_to_execute = ["docker", "system", "prune", "-f"]
run_command(
system_prune_command_to_execute,
check=False,
)
elif given_answer == Answer.QUIT:
sys.exit(0)
get_console().print(f"Removing build cache dir {BUILD_CACHE_PATH}")
given_answer = user_confirm("Are you sure with the removal?")
if given_answer == Answer.YES:
if not get_dry_run():
shutil.rmtree(BUILD_CACHE_PATH, ignore_errors=True)
get_console().print("Uninstalling airflow and removing configuration")
given_answer = user_confirm("Are you sure with the uninstall / remove?")
if given_answer == Answer.YES:
if not get_dry_run():
shutil.rmtree(AIRFLOW_HOME_PATH, ignore_errors=True)
AIRFLOW_HOME_PATH.mkdir(exist_ok=True, parents=True)
run_command(["uv", "pip", "uninstall", "apache-airflow"], check=False)
elif given_answer == Answer.QUIT:
sys.exit(0)
to_be_excluded_from_deletion = (
# dirs
".idea/", # Pycharm config
".vscode/", # VSCode config
".venv/",
"files/",
"logs/",
# files
".bash_history",
".bash_aliases",
)
get_console().print(
"Removing build file and git untracked files. This also removes files ignored in .gitignore.\n"
f"The following files will not be removed: `{to_be_excluded_from_deletion}`."
)
given_answer = user_confirm("Are you sure with the removal of build files?")
if given_answer == Answer.YES:
system_prune_command_to_execute = ["git", "clean", "-fdx"]
for excluded_object in to_be_excluded_from_deletion:
system_prune_command_to_execute.extend(["-e", excluded_object])
run_command(
system_prune_command_to_execute,
check=False,
)
elif given_answer == Answer.QUIT:
sys.exit(0)
|
MainGroupWithAliases
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 518327,
"end": 519530
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "pull_requests")
field = sgqlc.types.Field(
sgqlc.types.non_null("ProjectV2FieldConfiguration"), graphql_name="field"
)
pull_requests = sgqlc.types.Field(
"PullRequestConnection",
graphql_name="pullRequests",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
PullRequestOrder,
graphql_name="orderBy",
default={"field": "CREATED_AT", "direction": "ASC"},
),
),
)
),
)
|
ProjectV2ItemFieldPullRequestValue
|
python
|
numpy__numpy
|
numpy/random/tests/test_smoke.py
|
{
"start": 2411,
"end": 2557
}
|
class ____:
bit_generator: type[np.random.BitGenerator]
advance: int
seed: list[int]
rg: Generator
seed_vector_bits: int
|
RNGData
|
python
|
celery__celery
|
t/unit/app/test_log.py
|
{
"start": 3973,
"end": 10490
}
|
class ____:
def setup_logger(self, *args, **kwargs):
self.app.log.setup_logging_subsystem(*args, **kwargs)
return logging.root
def setup_method(self):
self.get_logger = lambda n=None: get_logger(n) if n else logging.root
signals.setup_logging.receivers[:] = []
self.app.log.already_setup = False
def test_get_logger_sets_parent(self):
logger = get_logger('celery.test_get_logger')
assert logger.parent.name == base_logger.name
def test_get_logger_root(self):
logger = get_logger(base_logger.name)
assert logger.parent is logging.root
def test_setup_logging_subsystem_misc(self, restore_logging):
self.app.log.setup_logging_subsystem(loglevel=None)
def test_setup_logging_subsystem_misc2(self, restore_logging):
self.app.conf.worker_hijack_root_logger = True
self.app.log.setup_logging_subsystem()
def test_get_default_logger(self):
assert self.app.log.get_default_logger()
def test_configure_logger(self):
logger = self.app.log.get_default_logger()
self.app.log._configure_logger(logger, sys.stderr, None, '', False)
self.app.log._configure_logger(None, sys.stderr, None, '', False)
logger.handlers[:] = []
def test_setup_logging_subsystem_colorize(self, restore_logging):
self.app.log.setup_logging_subsystem(colorize=None)
self.app.log.setup_logging_subsystem(colorize=True)
@pytest.mark.masked_modules('billiard.util')
def test_setup_logging_subsystem_no_mputil(self, restore_logging, mask_modules):
self.app.log.setup_logging_subsystem()
def test_setup_logger(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False, colorize=True)
logger.handlers = []
self.app.log.already_setup = False
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False, colorize=None)
# setup_logger logs to stderr without logfile argument.
assert (conftest.get_logger_handlers(logger)[0].stream is
sys.__stderr__)
def test_setup_logger_no_handlers_stream(self, restore_logging):
l = self.get_logger()
l.handlers = []
with conftest.stdouts() as (stdout, stderr):
l = self.setup_logger(logfile=sys.stderr,
loglevel=logging.INFO, root=False)
l.info('The quick brown fox...')
assert 'The quick brown fox...' in stderr.getvalue()
@patch('os.fstat')
def test_setup_logger_no_handlers_file(self, *args):
_, tempfile = mkstemp(suffix='unittest', prefix='celery')
with patch('builtins.open') as osopen:
with conftest.restore_logging_context_manager():
files = defaultdict(StringIO)
def open_file(filename, *args, **kwargs):
f = files[filename]
f.fileno = Mock()
f.fileno.return_value = 99
return f
osopen.side_effect = open_file
l = self.get_logger()
l.handlers = []
l = self.setup_logger(
logfile=tempfile, loglevel=logging.INFO, root=False,
)
assert isinstance(conftest.get_logger_handlers(l)[0],
logging.FileHandler)
assert tempfile in files
def test_redirect_stdouts(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
try:
with conftest.wrap_logger(logger) as sio:
self.app.log.redirect_stdouts_to_logger(
logger, loglevel=logging.ERROR,
)
logger.error('foo')
assert 'foo' in sio.getvalue()
self.app.log.redirect_stdouts_to_logger(
logger, stdout=False, stderr=False,
)
finally:
sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
def test_logging_proxy(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
with conftest.wrap_logger(logger) as sio:
p = LoggingProxy(logger, loglevel=logging.ERROR)
p.close()
p.write('foo')
assert 'foo' not in sio.getvalue()
p.closed = False
p.write('\n')
assert sio.getvalue() == ''
write_res = p.write('foo ')
assert sio.getvalue() == 'foo \n'
assert write_res == 4
lines = ['baz', 'xuzzy']
p.writelines(lines)
for line in lines:
assert line in sio.getvalue()
p.flush()
p.close()
assert not p.isatty()
with conftest.stdouts() as (stdout, stderr):
with in_sighandler():
p.write('foo')
assert stderr.getvalue()
def test_logging_proxy_bytes(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
with conftest.wrap_logger(logger) as sio:
p = LoggingProxy(logger, loglevel=logging.ERROR)
p.close()
p.write(b'foo')
assert 'foo' not in str(sio.getvalue())
p.closed = False
p.write(b'\n')
assert str(sio.getvalue()) == ''
write_res = p.write(b'foo ')
assert str(sio.getvalue()) == 'foo \n'
assert write_res == 4
p.flush()
p.close()
assert not p.isatty()
with conftest.stdouts() as (stdout, stderr):
with in_sighandler():
p.write(b'foo')
assert stderr.getvalue()
def test_logging_proxy_recurse_protection(self, restore_logging):
logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,
root=False)
p = LoggingProxy(logger, loglevel=logging.ERROR)
p._thread.recurse_protection = True
try:
assert p.write('FOOFO') == 0
finally:
p._thread.recurse_protection = False
|
test_default_logger
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/links/test_vertex_ai.py
|
{
"start": 1566,
"end": 2626
}
|
class ____:
def test_class_attributes(self):
assert VertexAIRayClusterLink.key == EXPECTED_VERTEX_AI_RAY_CLUSTER_LINK_KEY
assert VertexAIRayClusterLink.name == EXPECTED_VERTEX_AI_RAY_CLUSTER_LINK_NAME
assert VertexAIRayClusterLink.format_str == EXPECTED_VERTEX_AI_RAY_CLUSTER_LINK_FORMAT_STR
def test_persist(self):
mock_context = mock.MagicMock()
mock_context["ti"] = mock.MagicMock(location=TEST_LOCATION, project_id=TEST_PROJECT_ID)
mock_context["task"] = mock.MagicMock()
VertexAIRayClusterLink.persist(
context=mock_context,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_PROJECT_ID,
)
mock_context["ti"].xcom_push.assert_called_once_with(
key=EXPECTED_VERTEX_AI_RAY_CLUSTER_LINK_KEY,
value={
"location": TEST_LOCATION,
"cluster_id": TEST_CLUSTER_ID,
"project_id": TEST_PROJECT_ID,
},
)
|
TestVertexAIRayClusterLink
|
python
|
PyCQA__flake8
|
tests/unit/test_base_formatter.py
|
{
"start": 5400,
"end": 5777
}
|
class ____(base.BaseFormatter):
"""Subclass for testing after_init."""
def after_init(self):
"""Define method to verify operation."""
self.post_initialized = True
def test_after_init_is_always_called():
"""Verify after_init is called."""
formatter = AfterInitFormatter(options())
assert formatter.post_initialized is True
|
AfterInitFormatter
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/Transform3D.py
|
{
"start": 101,
"end": 1935
}
|
class ____(QtGui.QMatrix4x4):
"""
Extension of QMatrix4x4 with some helpful methods added.
"""
def __init__(self, *args):
if len(args) == 1:
if isinstance(args[0], (list, tuple, np.ndarray)):
args = [x for y in args[0] for x in y]
if len(args) != 16:
raise TypeError("Single argument to Transform3D must have 16 elements.")
elif isinstance(args[0], QtGui.QMatrix4x4):
args = list(args[0].copyDataTo())
QtGui.QMatrix4x4.__init__(self, *args)
def matrix(self, nd=3):
if nd == 3:
return np.array(self.copyDataTo()).reshape(4,4)
elif nd == 2:
m = np.array(self.copyDataTo()).reshape(4,4)
m[2] = m[3]
m[:,2] = m[:,3]
return m[:3,:3]
else:
raise Exception("Argument 'nd' must be 2 or 3")
def map(self, obj):
"""
Extends QMatrix4x4.map() to allow mapping (3, ...) arrays of coordinates
"""
if isinstance(obj, np.ndarray) and obj.shape[0] in (2,3):
if obj.ndim >= 2:
return fn.transformCoordinates(self, obj)
elif obj.ndim == 1:
v = QtGui.QMatrix4x4.map(self, Vector(obj))
return np.array([v.x(), v.y(), v.z()])[:obj.shape[0]]
elif isinstance(obj, (list, tuple)):
v = QtGui.QMatrix4x4.map(self, Vector(obj))
return type(obj)([v.x(), v.y(), v.z()])[:len(obj)]
else:
retval = QtGui.QMatrix4x4.map(self, obj)
if not isinstance(retval, type(obj)):
return type(obj)(retval)
return retval
def inverted(self):
inv, b = QtGui.QMatrix4x4.inverted(self)
return Transform3D(inv), b
|
Transform3D
|
python
|
Textualize__textual
|
src/textual/css/query.py
|
{
"start": 1284,
"end": 1587
}
|
class ____(QueryError):
"""Query result was not of the correct type."""
QueryType = TypeVar("QueryType", bound="Widget")
"""Type variable used to type generic queries."""
ExpectType = TypeVar("ExpectType")
"""Type variable used to further restrict queries."""
@rich.repr.auto(angular=True)
|
WrongType
|
python
|
django__django
|
django/contrib/gis/db/models/lookups.py
|
{
"start": 11436,
"end": 11572
}
|
class ____(DistanceLookupFromFunction):
lookup_name = "distance_gte"
op = ">="
@BaseSpatialField.register_lookup
|
DistanceGTELookup
|
python
|
cython__cython
|
docs/examples/userguide/extension_types/dict_animal.py
|
{
"start": 15,
"end": 217
}
|
class ____:
number_of_legs: cython.int
__dict__: dict
def __cinit__(self, number_of_legs: cython.int):
self.number_of_legs = number_of_legs
dog = Animal(4)
dog.has_tail = True
|
Animal
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/declarative/test_tm_future_annotations_sync.py
|
{
"start": 109625,
"end": 115232
}
|
class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
def test_mapped_column_omit_fn(self, decl_base):
class MixinOne:
name: Mapped[str]
x: Mapped[int]
y: Mapped[int] = mapped_column()
class A(MixinOne, decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
eq_(A.__table__.c.keys(), ["id", "name", "x", "y"])
self.assert_compile(select(A), "SELECT a.id, a.name, a.x, a.y FROM a")
def test_mapped_column_omit_fn_fixed_table(self, decl_base):
class MixinOne:
name: Mapped[str]
x: Mapped[int]
y: Mapped[int]
a = Table(
"a",
decl_base.metadata,
Column("id", Integer, primary_key=True),
Column("name", String(50), nullable=False),
Column("data", String(50)),
Column("x", Integer),
Column("y", Integer),
)
class A(MixinOne, decl_base):
__table__ = a
id: Mapped[int]
self.assert_compile(
select(A), "SELECT a.id, a.name, a.data, a.x, a.y FROM a"
)
def test_mc_duplication_plain(self, decl_base):
class MixinOne:
name: Mapped[str] = mapped_column()
class A(MixinOne, decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
class B(MixinOne, decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
is_not(A.__table__.c.name, B.__table__.c.name)
def test_mc_duplication_declared_attr(self, decl_base):
class MixinOne:
@declared_attr
def name(cls) -> Mapped[str]:
return mapped_column()
class A(MixinOne, decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
class B(MixinOne, decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
is_not(A.__table__.c.name, B.__table__.c.name)
def test_relationship_requires_declared_attr(self, decl_base):
class Related(decl_base):
__tablename__ = "related"
id: Mapped[int] = mapped_column(primary_key=True)
class HasRelated:
related_id: Mapped[int] = mapped_column(ForeignKey(Related.id))
related: Mapped[Related] = relationship()
with expect_raises_message(
sa_exc.InvalidRequestError,
r"Mapper properties \(i.e. deferred,column_property\(\), "
r"relationship\(\), etc.\) must be declared",
):
class A(HasRelated, decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
def test_relationship_duplication_declared_attr(self, decl_base):
class Related(decl_base):
__tablename__ = "related"
id: Mapped[int] = mapped_column(primary_key=True)
class HasRelated:
related_id: Mapped[int] = mapped_column(ForeignKey(Related.id))
@declared_attr
def related(cls) -> Mapped[Related]:
return relationship()
class A(HasRelated, decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
class B(HasRelated, decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
self.assert_compile(
select(A).join(A.related),
"SELECT a.id, a.related_id FROM a "
"JOIN related ON related.id = a.related_id",
)
self.assert_compile(
select(B).join(B.related),
"SELECT b.id, b.related_id FROM b "
"JOIN related ON related.id = b.related_id",
)
@testing.variation("use_directive", [True, False])
@testing.variation("use_annotation", [True, False])
def test_supplemental_declared_attr(
self, decl_base, use_directive, use_annotation
):
"""test #9957"""
class User(decl_base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
branch_id: Mapped[int] = mapped_column(ForeignKey("thing.id"))
class Mixin:
id: Mapped[int] = mapped_column(primary_key=True)
@declared_attr
def users(self) -> Mapped[List[User]]:
return relationship(User)
if use_directive:
if use_annotation:
@declared_attr.directive
def user_ids(self) -> AssociationProxy[List[int]]:
return association_proxy("users", "id")
else:
@declared_attr.directive
def user_ids(self):
return association_proxy("users", "id")
else:
if use_annotation:
@declared_attr
def user_ids(self) -> AssociationProxy[List[int]]:
return association_proxy("users", "id")
else:
@declared_attr
def user_ids(self):
return association_proxy("users", "id")
class Thing(Mixin, decl_base):
__tablename__ = "thing"
t1 = Thing()
t1.users.extend([User(id=1), User(id=2)])
eq_(t1.user_ids, [1, 2])
|
MixinTest
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/plugins/precision/transformer_engine.py
|
{
"start": 765,
"end": 1940
}
|
class ____(Precision, FabricTEPrecision):
"""Plugin for training with fp8 precision via nvidia's
`Transformer Engine <https://docs.nvidia.com/deeplearning/transformer-engine>`__.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
Args:
dtype: The weights dtype to use.
recipe: Recipe for the DelayedScaling
`configuration <https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html#transformer_engine.common.recipe.DelayedScaling>`__.
In dict format or the dataclass format.
replace_layers: Whether to replace ``Linear`` and ``LayerNorm`` layers automatically with their Transformer
Engine alternatives. Note that they don't subclass the torch equivalents so checks like
``isinstance(l, torch.nn.Linear)`` will not pass.
.. note::
Support for FP8 in the linear layers with this plugin is currently limited to tensors
with shapes where the dimensions are divisible by 8 and 16 respectively. You might want to add padding to your
inputs to conform to this restriction.
"""
|
TransformerEnginePrecision
|
python
|
huggingface__transformers
|
src/transformers/models/deformable_detr/modeling_deformable_detr.py
|
{
"start": 33076,
"end": 36963
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: DeformableDetrConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = DeformableDetrMultiscaleDeformableAttention(
config,
num_heads=config.encoder_attention_heads,
n_points=config.encoder_n_points,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes of the backbone feature maps.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Apply Multi-scale Deformable Attention Module on the multi-scale feature maps.
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
DeformableDetrEncoderLayer
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/tests/tpu_embedding_v2_correctness_ragged_training_test.py
|
{
"start": 954,
"end": 1441
}
|
class ____(
tpu_embedding_v2_correctness_base_test.TPUEmbeddingCorrectnessBaseTest):
@parameterized.parameters(
['sgd', 'adagrad', 'adam', 'ftrl', 'adagrad_momentum'])
def test_embedding(self, optimizer_name):
if optimizer_name != 'sgd':
self.skip_if_oss()
self._test_embedding(
optimizer_name, training=True, sparse=False, is_high_dimensional=False)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
TPUEmbeddingCorrectnessTest
|
python
|
Netflix__metaflow
|
test/core/tests/resume_recursive_switch_inside_foreach.py
|
{
"start": 63,
"end": 2271
}
|
class ____(MetaflowTest):
RESUME = True
PRIORITY = 2
ONLY_GRAPHS = ["recursive_switch_inside_foreach"]
@steps(0, ["start"], required=True)
def step_start(self):
if not is_resumed():
self.items = [
{"id": "A", "iterations": 3},
{"id": "B", "iterations": 5},
{"id": "C", "iterations": 2},
]
@steps(0, ["loop_start"], required=True)
def step_start_loop_for_item(self):
self.item_id = self.input["id"]
self.max_loops = self.input["iterations"]
self.item_loop_count = 0
@steps(0, ["loop_body"], required=True)
def step_loop_body(self):
self.item_loop_count += 1
if not is_resumed() and self.item_id == "B" and self.item_loop_count == 3:
raise ResumeFromHere()
self.should_continue = str(self.item_loop_count < self.max_loops)
@steps(0, ["loop_exit"], required=True)
def step_exit_item_loop(self):
assert_equals(self.max_loops, self.item_loop_count)
self.result = (
f"Item {self.item_id} finished after {self.item_loop_count} iterations."
)
@steps(0, ["join-foreach"], required=True)
def step_join(self, inputs):
self.results = sorted([inp.result for inp in inputs])
@steps(1, ["end"], required=True)
def step_end(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
if run is not None:
expected = [
"Item A finished after 3 iterations.",
"Item B finished after 5 iterations.",
"Item C finished after 2 iterations.",
]
checker.assert_artifact("join", "results", expected)
exit_steps = run["exit_item_loop"]
exit_steps_by_id = {s.data.item_id: s for s in exit_steps}
assert_equals(3, len(list(exit_steps)))
# Branch 'B' failed and was re-executed from the start of the branch.
# Its exit step is a new task and should NOT have an 'origin-task-id'.
assert "origin-task-id" not in exit_steps_by_id["B"].metadata_dict
|
ResumeRecursiveSwitchInsideForeachFlowTest
|
python
|
fluentpython__example-code
|
19-dyn-attr-prop/oscon/explore1.py
|
{
"start": 1059,
"end": 1870
}
|
class ____:
"""A read-only façade for navigating a JSON-like object
using attribute notation
"""
# BEGIN EXPLORE1
def __init__(self, mapping):
self.__data = {}
for key, value in mapping.items():
if keyword.iskeyword(key): # <1>
key += '_'
self.__data[key] = value
# END EXPLORE1
def __getattr__(self, name):
if hasattr(self.__data, name):
return getattr(self.__data, name)
else:
return FrozenJSON.build(self.__data[name])
@classmethod
def build(cls, obj):
if isinstance(obj, abc.Mapping):
return cls(obj)
elif isinstance(obj, abc.MutableSequence):
return [cls.build(item) for item in obj]
else: # <8>
return obj
|
FrozenJSON
|
python
|
getsentry__sentry
|
tests/sentry/db/test_transactions.py
|
{
"start": 5011,
"end": 5165
}
|
class ____(CaseMixin, TransactionTestCase):
def test_collect_transaction_queries(self) -> None:
return
|
TestDjangoTransactionTestCaseTransactions
|
python
|
keon__algorithms
|
tests/test_graph.py
|
{
"start": 9729,
"end": 10359
}
|
class ____(unittest.TestCase):
def test_cycle_detection_with_cycle(self):
graph = {'A': ['B', 'C'],
'B': ['D'],
'C': ['F'],
'D': ['E', 'F'],
'E': ['B'],
'F': []}
self.assertTrue(cycle_detection.contains_cycle(graph))
def test_cycle_detection_with_no_cycle(self):
graph = {'A': ['B', 'C'],
'B': ['D', 'E'],
'C': ['F'],
'D': ['E'],
'E': [],
'F': []}
self.assertFalse(cycle_detection.contains_cycle(graph))
|
TestCycleDetection
|
python
|
django__django
|
django/contrib/postgres/aggregates/statistics.py
|
{
"start": 904,
"end": 964
}
|
class ____(StatAggregate):
function = "REGR_AVGX"
|
RegrAvgX
|
python
|
astropy__astropy
|
astropy/io/fits/tests/test_header.py
|
{
"start": 2075,
"end": 103493
}
|
class ____(FitsTestCase):
"""Test Header and Card objects."""
def test_rename_keyword(self):
"""Test renaming keyword with rename_keyword."""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
header.rename_keyword("A", "B")
assert "A" not in header
assert "B" in header
assert header[0] == "B"
assert header["B"] == "B"
assert header.comments["B"] == "C"
@pytest.mark.parametrize("key", ["A", "a"])
def test_indexing_case(self, key):
"""Check that indexing is case insensitive"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
assert key in header
assert header[key] == "B"
assert header.get(key) == "B"
assert header.index(key) == 0
assert header.comments[key] == "C"
assert header.count(key) == 1
header.remove(key, ignore_missing=False)
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
assert c.keyword == ""
def test_card_from_bytes(self):
"""
Test loading a Card from a `bytes` object (assuming latin-1 encoding).
"""
c = fits.Card.fromstring(b"ABC = 'abc'")
assert c.keyword == "ABC"
assert c.value == "abc"
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card("abc", "<8 ch")
assert str(c) == _pad("ABC = '<8 ch '")
c = fits.Card("nullstr", "")
assert str(c) == _pad("NULLSTR = ''")
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == _pad("ABC = T")
c = fits.Card.fromstring("ABC = F")
assert c.value is False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card("long_int", -467374636747637647347374734737437)
assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437")
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card("floatnum", -467374636747637647347374734737437.0)
if str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and str(c) != _pad(
"FLOATNUM= -4.6737463674763E+032"
):
assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32")
def test_floating_point_string_representation_card(self):
"""
Ensures Card formats float values with the correct precision, avoiding
comment truncation
Regression test for https://github.com/astropy/astropy/issues/14507
"""
k = "HIERARCH ABC DEF GH IJKLMN"
com = "[m] abcdef ghijklm nopqrstu vw xyzab"
c = fits.Card(k, 0.009125, com)
expected_str = f"{k} = 0.009125 / {com}"
assert str(c)[: len(expected_str)] == expected_str
c = fits.Card(k, 8.95, com)
expected_str = f"{k} = 8.95 / {com}"
assert str(c)[: len(expected_str)] == expected_str
c = fits.Card(k, -99.9, com)
expected_str = f"{k} = -99.9 / {com}"
assert str(c)[: len(expected_str)] == expected_str
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card("abc", (1.2345377437887837487e88 + 6324767364763746367e-33j))
f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)")
f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
if str(c) != f1 and str(c) != f2:
assert str(c) == f3
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
c = fits.Card("abc", 9, "abcde" * 20)
with pytest.warns(fits.verify.VerifyWarning):
assert (
str(c) == "ABC = 9 "
"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab"
)
c = fits.Card("abc", "a" * 68, "abcdefg")
with pytest.warns(fits.verify.VerifyWarning):
assert str(c) == f"ABC = '{'a' * 68}'"
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
pytest.raises(ValueError, fits.Card, ("abc",), {"value": (2, 3)})
pytest.raises(ValueError, fits.Card, "key", [], "comment")
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
with pytest.warns(
UserWarning,
match=(
r"^Keyword name 'abcdefghi' is greater than 8 characters or contains "
r"characters not allowed by the FITS standard; a HIERARCH card will be created\.$"
),
):
fits.Card("abcdefghi", "long")
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with pytest.warns(AstropyUserWarning) as w:
c = fits.Card("abc+", 9)
assert len(w) == 1
assert c.image == _pad("HIERARCH abc+ = 9")
def test_add_history(self):
header = fits.Header(
[
("A", "B", "C"),
("HISTORY", 1),
("HISTORY", 2),
("HISTORY", 3),
("", "", ""),
("", "", ""),
]
)
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header["HISTORY"] == [1, 2, 3, 4]
assert repr(header["HISTORY"]) == "1\n2\n3\n4"
header.add_history(0, after="A")
assert len(header) == 6
assert header.cards[1].value == 0
assert header["HISTORY"] == [0, 1, 2, 3, 4]
def test_add_blank(self):
header = fits.Header(
[("A", "B", "C"), ("", 1), ("", 2), ("", 3), ("", "", ""), ("", "", "")]
)
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[""] == [1, 2, 3, "", "", 4]
assert repr(header[""]) == "1\n2\n3\n\n\n4"
header.add_blank(0, after="A")
assert len(header) == 8
assert header.cards[1].value == 0
assert header[""] == [0, 1, 2, 3, "", "", 4]
header[""] = 5
header[" "] = 6
assert header[""] == [0, 1, 2, 3, "", "", 4, 5, 6]
assert header[" "] == [0, 1, 2, 3, "", "", 4, 5, 6]
def test_update(self):
class FakeHeader(list):
def keys(self):
return [l[0] for l in self]
def __getitem__(self, key):
return next(l[1:] for l in self if l[0] == key)
header = fits.Header()
header.update({"FOO": ("BAR", "BAZ")})
header.update(FakeHeader([("A", 1), ("B", 2, "comment")]))
assert set(header.keys()) == {"FOO", "A", "B"}
assert header.comments["B"] == "comment"
# test that comments are preserved
tmphdr = fits.Header()
tmphdr["HELLO"] = (1, "this is a comment")
header.update(tmphdr)
assert set(header.keys()) == {"FOO", "A", "B", "HELLO"}
assert header.comments["HELLO"] == "this is a comment"
header.update(NAXIS1=100, NAXIS2=100)
assert set(header.keys()) == {"FOO", "A", "B", "HELLO", "NAXIS1", "NAXIS2"}
assert tuple(header.values()) == ("BAR", 1, 2, 1, 100, 100)
def test_update_comment(self):
hdul = fits.open(self.data("arange.fits"))
hdul[0].header.update({"FOO": ("BAR", "BAZ")})
assert hdul[0].header["FOO"] == "BAR"
assert hdul[0].header.comments["FOO"] == "BAZ"
with pytest.raises(ValueError):
hdul[0].header.update({"FOO2": ("BAR", "BAZ", "EXTRA")})
hdul.writeto(self.temp("test.fits"))
hdul.close()
hdul = fits.open(self.temp("test.fits"), mode="update")
hdul[0].header.comments["FOO"] = "QUX"
hdul.close()
hdul = fits.open(self.temp("test.fits"))
assert hdul[0].header.comments["FOO"] == "QUX"
hdul[0].header.add_comment(0, after="FOO")
assert str(hdul[0].header.cards[-1]).strip() == "COMMENT 0"
hdul.close()
def test_commentary_cards(self):
# commentary cards
val = "A commentary card's value has no quotes around it."
c = fits.Card("HISTORY", val)
assert str(c) == _pad("HISTORY " + val)
val = "A commentary card has no comment."
c = fits.Card("COMMENT", val, "comment")
assert str(c) == _pad("COMMENT " + val)
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. "
"/ text after slash is still part of the value."
)
assert (
c.value == "card has no comments. "
"/ text after slash is still part of the value."
)
assert c.comment == ""
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == _pad("HISTORY (1, 2)")
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part of the
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == _pad("HISTORY = (1, 2)")
def test_blank_keyword(self):
c = fits.Card("", " / EXPOSURE INFORMATION")
assert str(c) == _pad(" / EXPOSURE INFORMATION")
c = fits.Card.fromstring(str(c))
assert c.keyword == ""
assert c.value == " / EXPOSURE INFORMATION"
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring("ABC = (8, 9)")
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self, capsys):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring("abc = + 2.1 e + 12")
assert c.value == 2100000000000.0
with pytest.warns(fits.verify.VerifyWarning) as w:
assert str(c) == _pad("ABC = +2.1E+12")
assert "Verification reported errors" in str(w[0].message)
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes / let's also try the comment"
)
with pytest.warns(fits.verify.VerifyWarning) as w:
assert (
str(c) == "NO_QUOTE= 'this card''s value has no quotes' "
"/ let's also try the comment "
)
assert "Verification reported errors" in str(w[0].message)
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring("ABC = ")
assert str(c) == _pad("ABC =")
def test_undefined_keys_values(self):
header = fits.Header()
header["FOO"] = "BAR"
header["UNDEF"] = None
assert list(header.values()) == ["BAR", None]
assert list(header.items()) == [("FOO", "BAR"), ("UNDEF", None)]
def test_mislocated_equal_sign(self, capsys):
# test mislocated "=" sign
c = fits.Card.fromstring("XYZ= 100")
assert c.keyword == "XYZ"
assert c.value == 100
with pytest.warns(fits.verify.VerifyWarning) as w:
assert str(c) == _pad("XYZ = 100")
assert "Verification reported errors" in str(w[0].message)
def test_equal_only_up_to_column_10(self, capsys):
# the test of "=" location is only up to column 10
# This test used to check if Astropy rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket #109 if the
# format is completely wrong we don't make any assumptions and the card
# should be left alone
c = fits.Card.fromstring("HISTO = (1, 2)")
with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"):
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"):
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring("ABC= a6")
with pytest.warns(AstropyUserWarning) as w:
c.verify()
err_text1 = "Card 'ABC' is not FITS standard (equal sign not at column 8)"
err_text2 = "Card 'ABC' is not FITS standard (invalid value string: 'a6'"
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
fix_text = "Fixed 'ABC' card to meet the FITS standard."
c = fits.Card.fromstring("ABC= a6")
with pytest.warns(fits.verify.VerifyWarning) as w:
c.verify("fix")
assert len(w) == 4
assert fix_text in str(w[2].message)
assert str(c) == _pad("ABC = 'a6 '")
def test_long_string_value(self):
# test long string value
c = fits.Card("abc", "long string value " * 10, "long comment " * 10)
assert (
str(c)
== "ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment "
)
def test_long_string_value_with_multiple_long_words(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11298
"""
c = fits.Card(
"WHATEVER",
"SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_"
"03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY"
"_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml",
)
assert (
str(c)
== "WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'"
"CONTINUE '.h5 &' "
"CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'"
"CONTINUE 'xml' "
)
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1["TEST"] = "abcdefg" * 30
h2 = fits.Header()
h2["TEST"] = "abcdefg" * 30
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header["TEST1"] = ("Regular value", "Regular comment")
header["TEST2"] = ("long string value " * 10, "long comment " * 10)
header["TEST3"] = ("Regular value", "Regular comment")
assert repr(header).splitlines() == [
str(fits.Card("TEST1", "Regular value", "Regular comment")),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '' / long comment ",
str(fits.Card("TEST3", "Regular value", "Regular comment")),
]
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = "long string value " * 10
header = fits.Header()
header[""] = value
assert len(header) == 3
assert " ".join(header[""]) == value.rstrip()
# Ensure that this works like other commentary keywords
header["COMMENT"] = value
header["HISTORY"] = value
assert header["COMMENT"] == header["HISTORY"]
assert header["COMMENT"] == header[""]
def check_roundtrip(self, card):
hdu = fits.PrimaryHDU()
hdu.header.append(card)
hdu.writeto(self.temp("test_new.fits"))
hdul = fits.open(self.temp("test_new.fits"))
new_card = hdul[0].header.cards[card.keyword]
hdul.close()
assert new_card.keyword == card.keyword
assert new_card.value == card.value
assert new_card.comment == card.comment
def test_long_string_from_file(self):
c = fits.Card("abc", "long string value " * 10, "long comment " * 10)
c.verify()
assert (
str(c)
== "ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment "
)
self.check_roundtrip(c)
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card("abc", "longstringvalue" * 10, "longcomment" * 10)
assert (
str(c)
== "ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment "
)
def test_long_string_value_via_fromstring(self, capsys):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' / comments in line 1")
+ _pad(
"continue 'continue with long string but without the "
"ampersand at the end' /"
)
+ _pad(
"continue 'continue must have string value (with quotes)' "
"/ comments with ''. "
)
)
with pytest.warns(fits.verify.VerifyWarning) as w:
assert (
str(c)
== "ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '' / comments in line 1 comments with ''. "
)
assert "Verification reported errors" in str(w[0].message)
def test_long_string_value_with_quotes(self):
testval = "x" * 100 + "''"
c = fits.Card("TEST", testval)
c = fits.Card.fromstring(c.image)
assert c.value == testval
testval = "x" * 100 + "''xxx"
c = fits.Card("TEST", testval)
c = fits.Card.fromstring(c.image)
assert c.value == testval
testval = "x" * 100 + "'' xxx"
c = fits.Card("TEST", testval)
c = fits.Card.fromstring(c.image)
assert c.value == testval
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad(
"EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'"
)
+ _pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'")
+ _pad("CONTINUE '&' / pysyn expression")
)
assert c.keyword == "EXPR"
assert (
c.value == "/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits "
"* 5.87359e-12 * MWAvg(Av=0.12)"
)
assert c.comment == "pysyn expression"
def test_final_continue_card_lacks_ampersand(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
h = fits.Header()
h["SVALUE"] = "A" * 69
assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'")
def test_final_continue_card_ampersand_removal_on_long_comments(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
c = fits.Card("TEST", "long value" * 10, "long comment &" * 10)
assert (
str(c)
== "TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' "
"CONTINUE 'valuelong valuelong valuelong value&' "
"CONTINUE '&' / long comment &long comment &long comment &long comment &long "
"CONTINUE '&' / comment &long comment &long comment &long comment &long comment "
"CONTINUE '' / &long comment & "
)
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with pytest.warns(
AstropyUserWarning, match="HIERARCH card will be created"
) as w:
c = fits.Card(
"ESO INS SLIT2 Y1FRML",
"ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)",
)
assert len(w) == 1
assert (
str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'"
)
# Test manual creation of hierarch card
c = fits.Card("hierarch abcdefghi", 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card(
"HIERARCH ESO INS SLIT2 Y1FRML",
"ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)",
)
assert (
str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'"
)
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
def test_hierarch_not_warn(self):
"""Check that compressed image headers do not issue HIERARCH warnings."""
filename = fits.util.get_testdata_filepath("compressed_image.fits")
with fits.open(filename) as hdul:
header = hdul[1].header
with warnings.catch_warnings(record=True) as warning_list:
header["HIERARCH LONG KEYWORD"] = 42
assert len(warning_list) == 0
assert header["LONG KEYWORD"] == 42
assert header["HIERARCH LONG KEYWORD"] == 42
# Check that it still warns if we do not use HIERARCH
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header["LONG KEYWORD2"] = 1
assert header["LONG KEYWORD2"] == 1
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring("HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
# Test also with creation via the Card constructor
c = fits.Card("HIERARCH key.META_4", "calFileVersion")
assert c.keyword == "key.META_4"
assert c.value == "calFileVersion"
assert c.comment == ""
def test_hierarch_key_with_long_value(self):
# regression test for gh-3746
long_key = "A VERY LONG KEY HERE"
long_value = (
"A VERY VERY VERY VERY LONG STRING THAT SOMETHING MAY BE MAD"
" ABOUT PERSISTING BECAUSE ASTROPY CAN'T HANDLE THE TRUTH"
)
with pytest.warns(fits.verify.VerifyWarning, match="greater than 8"):
card = fits.Card(long_key, long_value)
card.verify()
assert str(card) == (
"HIERARCH A VERY LONG KEY HERE = 'A VERY VERY VERY VERY LONG STRING THAT &' "
"CONTINUE 'SOMETHING MAY BE MAD ABOUT PERSISTING BECAUSE ASTROPY CAN''T &' "
"CONTINUE 'HANDLE THE TRUTH' "
)
self.check_roundtrip(card)
def test_hierarch_key_with_long_value_no_spaces(self):
# regression test for gh-3746
long_key = "A VERY LONG KEY HERE"
long_value = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ" * 3
with pytest.warns(fits.verify.VerifyWarning, match="greater than 8"):
card = fits.Card(long_key, long_value)
card.verify()
assert str(card) == (
"HIERARCH A VERY LONG KEY HERE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRS&'"
"CONTINUE 'TUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGH&'"
"CONTINUE 'IJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ' "
)
self.check_roundtrip(card)
def test_hierarch_key_with_medium_value_and_comment(self):
long_key = "A VERY LONG KEY HERE"
medium_value = "ABCD EFGH IJKL MNOP QRST " * 2
assert len(medium_value) == 50 # Just right to trigger previous bug
comment = "random comment"
with pytest.warns(fits.verify.VerifyWarning, match="greater than 8"):
card = fits.Card(long_key, medium_value, comment)
card.verify()
assert str(card) == (
"HIERARCH A VERY LONG KEY HERE = 'ABCD EFGH IJKL MNOP QRST ABCD EFGH IJKL MNOP &'"
+ _pad("CONTINUE 'QRST &'")
+ _pad("CONTINUE '' / random comment")
)
self.check_roundtrip(card)
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card("HIERARCH WeirdCard.~!@#_^$%&", "The value", "a comment")
# This should not raise any exceptions
c.verify("exception")
assert c.keyword == "WeirdCard.~!@#_^$%&"
assert c.value == "The value"
assert c.comment == "a comment"
# Test also the specific case from the original bug report
header = fits.Header(
[
("simple", True),
("BITPIX", 8),
("NAXIS", 0),
("EXTEND", True, "May contain datasets"),
("HIERARCH key.META_0", "detRow"),
]
)
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
header2 = hdul[0].header
assert str(header.cards[header.index("key.META_0")]) == str(
header2.cards[header2.index("key.META_0")]
)
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
# De-referencing header through the inline function should behave
# identically to accessing it in the pytest.raises context below.
pytest.raises(KeyError, lambda k: header[k], "NAXIS")
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'NAXIS' not found."):
header["NAXIS"]
def test_hierarch_card_lookup(self):
header = fits.Header()
header["hierarch abcdefghi"] = 10
assert "abcdefghi" in header
assert header["abcdefghi"] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert "ABCDEFGHI" in header
def test_hierarch_card_delete(self):
header = fits.Header()
header["hierarch abcdefghi"] = 10
del header["hierarch abcdefghi"]
def test_hierarch_card_insert_delete(self):
header = fits.Header()
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header["abcdefghi"] = 10
header["abcdefgh"] = 10
header["abcdefg"] = 10
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header.insert(2, ("abcdefghij", 10))
del header["abcdefghij"]
with pytest.warns(
fits.verify.VerifyWarning, match=r"greater than 8 characters"
):
header.insert(2, ("abcdefghij", 10))
del header[2]
assert list(header.keys())[2] == "abcdefg".upper()
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = "a HIERARCH card will be created"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({"HIERARCH BLAH BLAH": "TESTA"})
assert len(w) == 0
assert "BLAH BLAH" in header
assert header["BLAH BLAH"] == "TESTA"
header.update({"HIERARCH BLAH BLAH": "TESTB"})
assert len(w) == 0
assert header["BLAH BLAH"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLAH BLAH": "TESTC"})
assert len(w) == 1
assert len(header) == 1
assert header["BLAH BLAH"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH blah blah": "TESTD"})
assert len(w) == 1
assert len(header) == 1
assert header["blah blah"], "TESTD"
header.update({"blah blah": "TESTE"})
assert len(w) == 2
assert len(header) == 1
assert header["blah blah"], "TESTE"
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update({"BLAH BLAH BLAH": "TESTA"})
assert len(w) == 3
assert msg in str(w[0].message)
header.update({"HIERARCH BLAH BLAH BLAH": "TESTB"})
assert len(w) == 3
assert header["BLAH BLAH BLAH"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLAH BLAH BLAH": "TESTC"})
assert len(w) == 4
assert header["BLAH BLAH BLAH"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH blah blah blah": "TESTD"})
assert len(w) == 4
assert header["blah blah blah"], "TESTD"
header.update({"blah blah blah": "TESTE"})
assert len(w) == 5
assert header["blah blah blah"], "TESTE"
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = "a HIERARCH card will be created"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({"HIERARCH BLA BLA": "TESTA"})
assert len(w) == 0
assert "BLA BLA" in header
assert header["BLA BLA"] == "TESTA"
header.update({"HIERARCH BLA BLA": "TESTB"})
assert len(w) == 0
assert header["BLA BLA"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLA BLA": "TESTC"})
assert len(w) == 1
assert header["BLA BLA"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH bla bla": "TESTD"})
assert len(w) == 1
assert len(header) == 1
assert header["bla bla"], "TESTD"
header.update({"bla bla": "TESTE"})
assert len(w) == 2
assert len(header) == 1
assert header["bla bla"], "TESTE"
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update({"BLA BLA": "TESTA"})
print([x.category for x in w])
assert len(w) == 1
assert msg in str(w[0].message)
header.update({"HIERARCH BLA BLA": "TESTB"})
assert len(w) == 1
assert header["BLA BLA"], "TESTB"
# Update without explicitly stating 'HIERARCH':
header.update({"BLA BLA": "TESTC"})
assert len(w) == 2
assert header["BLA BLA"], "TESTC"
# Test case-insensitivity
header.update({"HIERARCH bla bla": "TESTD"})
assert len(w) == 2
assert len(header) == 1
assert header["bla bla"], "TESTD"
header.update({"bla bla": "TESTE"})
assert len(w) == 3
assert len(header) == 1
assert header["bla bla"], "TESTE"
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header["FOO"] = ("bar", "baz", "qux")
pytest.raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header["FOO"] = ("BAR",)
header["FOO2"] = (None,)
assert header["FOO"] == "BAR"
assert header["FOO2"] is None
assert header[0] == "BAR"
assert header.comments[0] == ""
assert header.comments["FOO"] == ""
def test_header_setitem_2tuple(self):
header = fits.Header()
header["FOO"] = ("BAR", "BAZ")
header["FOO2"] = (None, None)
assert header["FOO"] == "BAR"
assert header["FOO2"] is None
assert header[0] == "BAR"
assert header.comments[0] == "BAZ"
assert header.comments["FOO"] == "BAZ"
assert header.comments["FOO2"] == ""
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card an
undefined value. Undefined value should map to None.
"""
header = fits.Header()
header["FOO"] = "BAR"
assert header["FOO"] == "BAR"
header["FOO"] = None
assert header["FOO"] is None
# Create a header that contains an undefined value and a defined
# value.
hstr = "UNDEF = \nDEFINED = 42"
header = fits.Header.fromstring(hstr, sep="\n")
# Explicitly add a card with an UNDEFINED value
c = fits.Card("UNDEF2", fits.card.UNDEFINED)
header.extend([c])
# And now assign an undefined value to the header through setitem
header["UNDEF3"] = fits.card.UNDEFINED
# Tuple assignment
header.append(("UNDEF5", None, "Undefined value"), end=True)
header.append("UNDEF6")
assert header["DEFINED"] == 42
assert header["UNDEF"] is None
assert header["UNDEF2"] is None
assert header["UNDEF3"] is None
assert header["UNDEF5"] is None
assert header["UNDEF6"] is None
# Assign an undefined value to a new card
header["UNDEF4"] = None
# Overwrite an existing value with None
header["DEFINED"] = None
# All headers now should be undefined
for c in header.cards:
assert c.value == fits.card.UNDEFINED
def test_set_comment_only(self):
header = fits.Header([("A", "B", "C")])
header.set("A", comment="D")
assert header["A"] == "B"
assert header.comments["A"] == "D"
def test_header_iter(self):
header = fits.Header([("A", "B"), ("C", "D")])
assert list(header) == ["A", "C"]
def test_header_slice(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
newheader = header[1:]
assert len(newheader) == 2
assert "A" not in newheader
assert "C" in newheader
assert "E" in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == "F"
assert newheader[1] == "D"
assert newheader[2] == "B"
newheader = header[::2]
assert len(newheader) == 2
assert "A" in newheader
assert "C" not in newheader
assert "E" in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = "GH"
assert header[1] == "GH"
assert header[2] == "GH"
# Now assign via an iterable
header[1:] = ["H", "I"]
assert header[1] == "H"
assert header[2] == "I"
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
del header[1:]
assert len(header) == 1
assert header[0] == "B"
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header(
[("ABC", 0), ("DEF", 1), ("ABD", 2)] # codespell:ignore abd
)
newheader = header["AB*"]
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([("DATE", 1), ("DATE-OBS", 2), ("DATE-FOO", 3)])
assert len(header["DATE*"]) == 3
assert len(header["DATE?*"]) == 2
assert len(header["DATE-*"]) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header(
[("ABC", 0), ("DEF", 1), ("ABD", 2)] # codespell:ignore abd
)
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header["AB*"] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header["AB*"] = "GH"
assert header[0] == "GH"
assert header[2] == "GH"
# Now assign via an iterable
header["AB*"] = ["H", "I"]
assert header[0] == "H"
assert header[2] == "I"
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header(
[("ABC", 0), ("DEF", 1), ("ABD", 2)] # codespell:ignore abd
)
del header["AB*"]
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header(
[
("ABC", 0),
("HISTORY", 1),
("HISTORY", 2),
("DEF", 3),
("HISTORY", 4),
("HISTORY", 5),
]
)
assert header["HISTORY"] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([("A", "B"), ("C", "D")])
header.clear()
assert "A" not in header
assert "C" not in header
assert len(header) == 0
@pytest.mark.parametrize("fitsext", [fits.ImageHDU(), fits.CompImageHDU()])
def test_header_clear_write(self, fitsext):
hdulist = fits.HDUList([fits.PrimaryHDU(), fitsext])
hdulist[1].header["FOO"] = "BAR"
hdulist[1].header.clear()
with pytest.raises(VerifyError) as err:
hdulist.writeto(self.temp("temp.fits"), overwrite=True)
err_msg = "'XTENSION' card does not exist."
assert err_msg in str(err.value)
def test_header_fromkeys(self):
header = fits.Header.fromkeys(["A", "B"])
assert "A" in header
assert header["A"] is None
assert header.comments["A"] == ""
assert "B" in header
assert header["B"] is None
assert header.comments["B"] == ""
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(["A", "B"], "C")
assert "A" in header
assert header["A"] == "C"
assert header.comments["A"] == ""
assert "B" in header
assert header["B"] == "C"
assert header.comments["B"] == ""
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(["A"], ("B", "C"))
assert "A" in header
assert header["A"] == "B"
assert header.comments["A"] == "C"
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(["A", "B", "A"], "C")
assert "A" in header
assert ("A", 0) in header
assert ("A", 1) in header
assert ("A", 2) not in header
assert header[0] == "C"
assert header["A"] == "C"
assert header[("A", 0)] == "C"
assert header[2] == "C"
assert header[("A", 1)] == "C"
def test_header_items(self):
header = fits.Header([("A", "B"), ("C", "D")])
assert list(header.items()) == [("A", "B"), ("C", "D")]
def test_header_iterkeys(self):
header = fits.Header([("A", "B"), ("C", "D")])
for a, b in zip(header.keys(), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([("A", "B"), ("C", "D")])
for a, b in zip(header.values(), ["B", "D"]):
assert a == b
def test_header_keys(self):
with fits.open(self.data("arange.fits")) as hdul:
assert list(hdul[0].header) == [
"SIMPLE",
"BITPIX",
"NAXIS",
"NAXIS1",
"NAXIS2",
"NAXIS3",
"EXTEND",
]
def test_header_list_like_pop(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")])
last = header.pop()
assert last == "H"
assert len(header) == 3
assert list(header) == ["A", "C", "E"]
mid = header.pop(1)
assert mid == "D"
assert len(header) == 2
assert list(header) == ["A", "E"]
first = header.pop(0)
assert first == "B"
assert len(header) == 1
assert list(header) == ["E"]
pytest.raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")])
pytest.raises(TypeError, header.pop, "A", "B", "C")
last = header.pop("G")
assert last == "H"
assert len(header) == 3
assert list(header) == ["A", "C", "E"]
mid = header.pop("C")
assert mid == "D"
assert len(header) == 2
assert list(header) == ["A", "E"]
first = header.pop("A")
assert first == "B"
assert len(header) == 1
assert list(header) == ["E"]
default = header.pop("X", "Y")
assert default == "Y"
assert len(header) == 1
pytest.raises(KeyError, header.pop, "X")
def test_popitem(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
pytest.raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
assert header.setdefault("A") == "B"
assert header.setdefault("C") == "D"
assert header.setdefault("E") == "F"
assert len(header) == 3
assert header.setdefault("G", "H") == "H"
assert len(header) == 4
assert "G" in header
assert header.setdefault("G", "H") == "H"
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.update({"A": "E", "F": "G"})
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([("A", "B"), ("C", "D")])
header.update(A="E", F="G")
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.update([("A", "E"), fits.Card("F", "G")])
assert header["A"] == "E"
assert header[0] == "E"
assert "F" in header
assert header["F"] == "G"
assert header[-1] == "G"
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header["MYKEY"] = ("some val", "some comment")
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header)[5] == "XTENSION"
assert hdu.header[-1] == "some val"
assert ("MYKEY", 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == "some val"
assert hdu.header[-1] == "some other val"
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == "some val"
def test_header_extend_unique_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added. Issue astropy/astropy#3967
"""
for commentary_card in ["", "COMMENT", "HISTORY"]:
for is_unique in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = "My text"
hdu.header.extend(hdu2.header, unique=is_unique)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == "My text"
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu.header["HISTORY"] = "history 1"
hdu2.header["MYKEY"] = ("some other val", "some other comment")
hdu2.header["HISTORY"] = "history 1"
hdu2.header["HISTORY"] = "history 2"
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ("MYKEY", 0) in hdu.header
assert ("MYKEY", 1) in hdu.header
assert hdu.header[("MYKEY", 1)] == "some other val"
assert len(hdu.header["HISTORY"]) == 3
assert hdu.header[-1] == "history 2"
hdu = fits.PrimaryHDU()
hdu.header["MYKEY"] = ("some val", "some comment")
hdu.header["HISTORY"] = "history 1"
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ("MYKEY", 0) in hdu.header
assert ("MYKEY", 1) not in hdu.header
assert hdu.header["MYKEY"] == "some other val"
assert len(hdu.header["HISTORY"]) == 2
assert hdu.header[-1] == "history 2"
def test_header_extend_update_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added.
Though not quite the same as astropy/astropy#3967, update=True hits
the same if statement as that issue.
"""
for commentary_card in ["", "COMMENT", "HISTORY"]:
for is_update in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = "My text"
hdu.header.extend(hdu2.header, update=is_update)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == "My text"
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data("test0.fits"))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")])
assert header.count("A") == 1
assert header.count("C") == 1
assert header.count("E") == 1
header["HISTORY"] = "a"
header["HISTORY"] = "b"
assert header.count("HISTORY") == 2
pytest.raises(KeyError, header.count, "G")
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([("A", "B"), ("C", "D")])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ""
assert header[-2] == ""
# New card should fill the first blank by default
header.append(("E", "F"))
assert len(header) == 4
assert header[-2] == "F"
assert header[-1] == ""
# This card should not use up a blank spot
header.append(("G", "H"), useblanks=False)
assert len(header) == 5
assert header[-1] == ""
assert header[-2] == "H"
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([("A", "B"), ("C", "D")])
header.append("E")
assert len(header) == 3
assert list(header)[-1] == "E"
assert header[-1] is None
assert header.comments["E"] == ""
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append("")
assert len(header) == 4
assert list(header)[-1] == ""
assert header[""] == ""
assert header.comments[""] == ""
def test_header_insert_use_blanks(self):
header = fits.Header([("A", "B"), ("C", "D")])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ("E", "F"))
assert len(header) == 4
assert header[1] == "F"
assert header[-1] == ""
assert header[-2] == "D"
# Insert a new card without using blanks
header.insert(1, ("G", "H"), useblanks=False)
assert len(header) == 5
assert header[1] == "H"
assert header[-1] == ""
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header(
[("NAXIS1", 10), ("COMMENT", "Comment 1"), ("COMMENT", "Comment 3")]
)
header.insert("NAXIS1", ("NAXIS", 2, "Number of axes"))
assert list(header.keys())[0] == "NAXIS"
assert header[0] == 2
assert header.comments[0] == "Number of axes"
header.insert("NAXIS1", ("NAXIS2", 20), after=True)
assert list(header.keys())[1] == "NAXIS1"
assert list(header.keys())[2] == "NAXIS2"
assert header[2] == 20
header.insert(("COMMENT", 1), ("COMMENT", "Comment 2"))
assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3"]
header.insert(("COMMENT", 2), ("COMMENT", "Comment 4"), after=True)
assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3", "Comment 4"]
header.insert(-1, ("TEST1", True))
assert list(header.keys())[-2] == "TEST1"
header.insert(-1, ("TEST2", True), after=True)
assert list(header.keys())[-1] == "TEST2"
assert list(header.keys())[-3] == "TEST1"
def test_remove(self):
header = fits.Header([("A", "B"), ("C", "D")])
# When keyword is present in the header it should be removed.
header.remove("C")
assert len(header) == 1
assert list(header) == ["A"]
assert "C" not in header
# When keyword is not present in the header and ignore_missing is
# False, KeyError should be raised
with pytest.raises(KeyError):
header.remove("F")
# When keyword is not present and ignore_missing is True, KeyError
# will be ignored
header.remove("F", ignore_missing=True)
assert len(header) == 1
# Test for removing all instances of a keyword
header = fits.Header([("A", "B"), ("C", "D"), ("A", "F")])
header.remove("A", remove_all=True)
assert "A" not in header
assert len(header) == 1
assert list(header) == ["C"]
assert header[0] == "D"
def test_header_comments(self):
header = fits.Header([("A", "B", "C"), ("DEF", "G", "H")])
assert repr(header.comments) == " A C\n DEF H"
def test_comment_slices_and_filters(self):
header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")])
s = header.comments[1:]
assert list(s) == ["H", "K"]
s = header.comments[::-1]
assert list(s) == ["K", "H", "D"]
s = header.comments["A*"]
assert list(s) == ["D", "K"]
def test_comment_slice_filter_assign(self):
header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")])
header.comments[1:] = "L"
assert list(header.comments) == ["D", "L", "L"]
assert header.cards[header.index("AB")].comment == "D"
assert header.cards[header.index("EF")].comment == "L"
assert header.cards[header.index("AI")].comment == "L"
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ["L", "L", "D"]
header.comments["A*"] = ["M", "N"]
assert list(header.comments) == ["M", "L", "N"]
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header["HISTORY"] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header["HISTORY"][1:] == indices[1:]
assert header["HISTORY"][:3] == indices[:3]
assert header["HISTORY"][:6] == indices[:6]
assert header["HISTORY"][:-2] == indices[:-2]
assert header["HISTORY"][::-1] == indices[::-1]
assert header["HISTORY"][1::-1] == indices[1::-1]
assert header["HISTORY"][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ("A", "B", "C"))
header.append(("D", "E", "F"), end=True)
assert list(header["HISTORY"][1:]) == indices[1:]
assert list(header["HISTORY"][:3]) == indices[:3]
assert list(header["HISTORY"][:6]) == indices[:6]
assert list(header["HISTORY"][:-2]) == indices[:-2]
assert list(header["HISTORY"][::-1]) == indices[::-1]
assert list(header["HISTORY"][1::-1]) == indices[1::-1]
assert list(header["HISTORY"][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header["FOO"] = "BAR"
header["HISTORY"] = "ABC"
header["FRED"] = "BARNEY"
header["HISTORY"] = "DEF"
header["HISTORY"] = "GHI"
assert header["HISTORY"] == ["ABC", "DEF", "GHI"]
# Single value update
header["HISTORY"][0] = "FOO"
assert header["HISTORY"] == ["FOO", "DEF", "GHI"]
# Single value partial slice update
header["HISTORY"][1:] = "BAR"
assert header["HISTORY"] == ["FOO", "BAR", "BAR"]
# Multi-value update
header["HISTORY"][:] = ["BAZ", "QUX"]
assert header["HISTORY"] == ["BAZ", "QUX", "BAR"]
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header["HISTORY"] = "hello world"
header["HISTORY"] = "hello world"
header["COMMENT"] = "hello world"
assert header["HISTORY"] != header["COMMENT"]
header["COMMENT"] = "hello world"
assert header["HISTORY"] == header["COMMENT"]
def test_long_commentary_card(self):
header = fits.Header()
header["FOO"] = "BAR"
header["BAZ"] = "QUX"
longval = "ABC" * 30
header["HISTORY"] = longval
header["FRED"] = "BARNEY"
header["HISTORY"] = longval
assert len(header) == 7
assert list(header)[2] == "FRED"
assert str(header.cards[3]) == "HISTORY " + longval[:72]
assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:]
header.set("HISTORY", longval, after="FOO")
assert len(header) == 9
assert str(header.cards[1]) == "HISTORY " + longval[:72]
assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:]
header = fits.Header()
header.update({"FOO": "BAR"})
header.update({"BAZ": "QUX"})
longval = "ABC" * 30
header.add_history(longval)
header.update({"FRED": "BARNEY"})
header.add_history(longval)
assert len(header.cards) == 7
assert header.cards[2].keyword == "FRED"
assert str(header.cards[3]) == "HISTORY " + longval[:72]
assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:]
header.add_history(longval, after="FOO")
assert len(header.cards) == 9
assert str(header.cards[1]) == "HISTORY " + longval[:72]
assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:]
def test_totxtfile(self, home_is_temp):
header_filename = self.temp("header.txt")
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header.totextfile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.totextfile(header_filename, overwrite=False)
hdul[0].header.totextfile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromtextfile(header_filename), update=True, update_first=True
)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp("test.fits"), output_verify="ignore")
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromtextfile(header_filename),
update=True,
update_first=True,
strip=False,
)
assert "MYKEY" in hdu.header
assert "EXTENSION" not in hdu.header
assert "SIMPLE" in hdu.header
hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
assert len(hdul2) == 2
assert "MYKEY" in hdul2[1].header
def test_tofile(self, home_is_temp):
"""
Repeat test_totxtfile, but with tofile()
"""
header_filename = self.temp("header.fits")
with fits.open(self.data("test0.fits")) as hdul:
hdul[0].header.tofile(header_filename)
# Check the `overwrite` flag
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
hdul[0].header.tofile(header_filename, overwrite=False)
hdul[0].header.tofile(header_filename, overwrite=True)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromfile(header_filename), update=True, update_first=True
)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp("test.fits"), output_verify="ignore")
with fits.open(self.temp("test.fits")) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({"MYKEY": "FOO"})
hdu.header.extend(
hdu.header.fromfile(header_filename),
update=True,
update_first=True,
strip=False,
)
assert "MYKEY" in hdu.header
assert "EXTENSION" not in hdu.header
assert "SIMPLE" in hdu.header
hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True)
with fits.open(self.temp("test.fits")) as hdul2:
assert len(hdul2) == 2
assert "MYKEY" in hdul2[1].header
def test_fromfile(self):
"""Regression test for https://github.com/astropy/astropy/issues/8711"""
filename = self.data("scale.fits")
hdr = fits.Header.fromfile(filename)
assert hdr["DATASET"] == "2MASS"
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header["A"] = ("B", "C")
header["B"] = ("C", "D")
header["C"] = ("D", "E")
with open(self.temp("test.hdr"), "w") as f:
f.write("\n".join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp("test.hdr"))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp("test.hdr"), "w") as f:
f.write("\n".join(str(c).strip() for c in header.cards))
f.write("\nEND")
new_header = fits.Header.fromtextfile(self.temp("test.hdr"))
assert "END" not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([("A", "B", "C"), ("D", "E", "F")])
def setitem(k, v):
header[k] = v
pytest.raises(ValueError, setitem, "END", "")
pytest.raises(ValueError, header.append, "END")
pytest.raises(ValueError, header.append, "END", end=True)
pytest.raises(ValueError, header.insert, len(header), "END")
pytest.raises(ValueError, header.set, "END")
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep="", endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += " " * _pad_length(len(s))
# This will differ between Python versions
if isinstance(s, bytes):
return BytesIO(s)
else:
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appended by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header("END =", True)
with pytest.warns(
AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='"
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header("END = ", True)
with pytest.warns(
AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='"
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# END card with trailing gibberish
s = invalid_header("END$%&%^*%*", True)
with pytest.warns(
AstropyUserWarning,
match=r"Unexpected bytes trailing END keyword: '\$%&%\^\*%\*'",
) as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header("END", False)
with pytest.warns(
AstropyUserWarning, match="Missing padding to end of the FITS block"
) as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
def test_invalid_characters(self):
"""
Test header with invalid characters
"""
# Generate invalid file with non-ASCII character
h = fits.Header()
h["FOO"] = "BAR"
h["COMMENT"] = "hello"
hdul = fits.PrimaryHDU(header=h, data=np.arange(5))
hdul.writeto(self.temp("test.fits"))
with open(self.temp("test.fits"), "rb") as f:
out = f.read()
out = out.replace(b"hello", "héllo".encode("latin1"))
out = out.replace(b"BAR", "BÀR".encode("latin1"))
with open(self.temp("test2.fits"), "wb") as f2:
f2.write(out)
with pytest.warns(
AstropyUserWarning,
match="non-ASCII characters are present in the FITS file",
) as w:
h = fits.getheader(self.temp("test2.fits"))
assert h["FOO"] == "B?R"
assert h["COMMENT"] == "h?llo"
assert len(w) == 1
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([("A", "B"), ("B", "C"), ("C", "D")])
header.set("B", before=2)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", after=0)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", before="C")
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", after="A")
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("B", before=2)
assert list(header) == ["A", "B", "C"]
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set("C", before=123)
assert list(header) == ["A", "B", "C"]
assert not header._modified
header.set("C", after=123)
assert list(header) == ["A", "B", "C"]
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep="\n")
# First the case that *does* work prior to fixing this issue
assert h["FOCALLEN"] == 155.0
assert h["APERTURE"] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with pytest.warns(fits.verify.VerifyWarning) as w:
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert "Verification reported errors" in str(w[0].message)
assert h.cards["FOCALLEN"]._modified
with pytest.warns(fits.verify.VerifyWarning) as w:
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
assert "Verification reported errors" in str(w[0].message)
assert h.cards["APERTURE"]._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep="\n")
with pytest.warns(fits.verify.VerifyWarning) as w:
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert "Verification reported errors" in str(w[0].message)
assert h.cards["FOCALLEN"]._modified
with pytest.warns(fits.verify.VerifyWarning) as w:
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
assert "Verification reported errors" in str(w[0].message)
assert h.cards["APERTURE"]._modified
assert h["FOCALLEN"] == 155.0
assert h["APERTURE"] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h["FOCALLEN"] = 155.0
h["APERTURE"] = 0.0
assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002")
assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000")
def test_invalid_float_cards2(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through Astropy--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header["TEST"] = 5.0022221e-07
hdu.writeto(self.temp("test.fits"))
# Here we manually make the file invalid
with open(self.temp("test.fits"), "rb+") as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii("e"))
with (
fits.open(self.temp("test.fits")) as hdul,
pytest.warns(AstropyUserWarning) as w,
):
hdul.writeto(self.temp("temp.fits"), output_verify="warn")
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now Astropy will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad("FOO = T")
barimg = _pad("BAR = F")
h = fits.Header()
h["FOO"] = True
h["BAR"] = False
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
h = fits.Header()
h["FOO"] = np.bool_(True)
h["BAR"] = np.bool_(False)
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h["FOO"] is True
assert h["BAR"] is False
assert str(h.cards["FOO"]) == fooimg
assert str(h.cards["BAR"]) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([("abC", 1), ("Def", 2), ("GeH", 3)])
assert list(h) == ["ABC", "DEF", "GEH"]
assert "abc" in h
assert "dEf" in h
assert h["geh"] == 3
# Case insensitivity of wildcards
assert len(h["g*"]) == 1
h["aBc"] = 2
assert h["abc"] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h["gEh"]
assert list(h) == ["ABC", "DEF"]
assert len(h) == 2
assert h.get("def") == 2
h.set("Abc", 3)
assert h["ABC"] == 3
h.set("gEh", 3, before="Abc")
assert list(h) == ["GEH", "ABC", "DEF"]
assert h.pop("abC") == 3
assert len(h) == 2
assert h.setdefault("def", 3) == 2
assert len(h) == 2
assert h.setdefault("aBc", 1) == 1
assert len(h) == 3
assert list(h) == ["GEH", "DEF", "ABC"]
h.update({"GeH": 1, "iJk": 4})
assert len(h) == 4
assert list(h) == ["GEH", "DEF", "ABC", "IJK"]
assert h["GEH"] == 1
assert h.count("ijk") == 1
assert h.index("ijk") == 3
h.remove("Def")
assert len(h) == 3
assert list(h) == ["GEH", "ABC", "IJK"]
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=data)
hdu.header["TESTKW"] = ("Test val", "This is the END")
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits"), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert "TESTKW" in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
hdu.writeto(self.temp("test.fits"), overwrite=True)
with fits.open(self.temp("test.fits")) as hdul:
assert "TESTKW" in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp("test.hdr"))
header2 = fits.Header.fromtextfile(self.temp("test.hdr"))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = "\u30a8\u30ea\u30c3\u30af"
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h["FOO"] = "BAR"
assert "FOO" in h
assert h["FOO"] == "BAR"
assert repr(h) == _pad("FOO = 'BAR '")
pytest.raises(ValueError, assign, erikku, "BAR")
h["FOO"] = "BAZ"
assert h["FOO"] == "BAZ"
assert repr(h) == _pad("FOO = 'BAZ '")
pytest.raises(ValueError, assign, "FOO", erikku)
h["FOO"] = ("BAR", "BAZ")
assert h["FOO"] == "BAR"
assert h.comments["FOO"] == "BAZ"
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
pytest.raises(ValueError, assign, "FOO", ("BAR", erikku))
pytest.raises(ValueError, assign, "FOO", (erikku, "BAZ"))
pytest.raises(ValueError, assign, "FOO", (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that `str` objects containing
non-ASCII characters cannot be assigned to headers.
It should not be possible to assign bytes to a header at all.
"""
h = fits.Header()
with pytest.raises(ValueError, match="Illegal value: b'Hello'."):
h.set("TEST", b"Hello")
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
`fits.conf.strip_header_whitespace` = False it should not be
stripped.
"""
h = fits.Header()
h["FOO"] = "Bar "
assert h["FOO"] == "Bar"
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h["QUX"] == "Bar"
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
with fits.conf.set_temp("strip_header_whitespace", False):
assert h["FOO"] == "Bar "
assert h["QUX"] == "Bar "
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
assert h["FOO"] == "Bar"
assert h["QUX"] == "Bar"
assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '"
assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the original header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = [
"CCD parameters table ...",
" reference table oref$n951041ko_ccd.fits",
" INFLIGHT 12/07/2001 25/02/2002",
" all bias frames",
] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header["HISTORY"] = item
hdu.writeto(self.temp("test.fits"))
with fits.open(self.temp("test.fits")) as hdul:
assert hdul[0].header["HISTORY"] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header["HISTORY"] == hdu.header["HISTORY"]
new_hdu.writeto(self.temp("test2.fits"))
with fits.open(self.temp("test2.fits")) as hdul:
assert hdul[0].header["HISTORY"] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring("CLFIND2D: contour = 0.30")
c2 = fits.Card.fromstring("Just some random text.")
c3 = fits.Card.fromstring("A" * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with pytest.warns(AstropyUserWarning) as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp("test.fits"))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp("test.fits")) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert "CLFIND2D" in header
assert "Just som" in header
assert "AAAAAAAA" in header
assert header["CLFIND2D"] == ": contour = 0.30"
assert header["Just som"] == "e random text."
assert header["AAAAAAAA"] == "A" * 72
# It should not be possible to assign to the invalid keywords
pytest.raises(ValueError, header.set, "CLFIND2D", "foo")
pytest.raises(ValueError, header.set, "Just som", "foo")
pytest.raises(ValueError, header.set, "AAAAAAAA", "foo")
def test_fix_hierarch_with_invalid_value(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring("HIERARCH ESO DET CHIP PXSPACE = 5e6")
with pytest.warns(fits.verify.VerifyWarning) as w:
c.verify("fix")
assert "Verification reported errors" in str(w[0].message)
assert str(c) == _pad("HIERARCH ESO DET CHIP PXSPACE = 5E6")
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, "TEST", float("nan"))
pytest.raises(ValueError, h.set, "TEST", np.nan)
pytest.raises(ValueError, h.set, "TEST", np.float32("nan"))
pytest.raises(ValueError, h.set, "TEST", float("inf"))
pytest.raises(ValueError, h.set, "TEST", np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([("TEST", True)])
h["TEST"] = 1
assert h["TEST"] is not True
assert isinstance(h["TEST"], int)
assert h["TEST"] == 1
h["TEST"] = np.bool_(True)
assert h["TEST"] is True
h["TEST"] = False
assert h["TEST"] is False
h["TEST"] = np.bool_(False)
assert h["TEST"] is False
h["TEST"] = 0
assert h["TEST"] is not False
assert isinstance(h["TEST"], int)
assert h["TEST"] == 0
h["TEST"] = np.bool_(False)
assert h["TEST"] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h["TEST"] = 1
# int -> float
h["TEST"] = 1.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 1.0")
# float -> int
h["TEST"] = 1
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 1")
# int -> complex
h["TEST"] = 1.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (1.0, 0.0)")
# complex -> float
h["TEST"] = 1.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 1.0")
# float -> complex
h["TEST"] = 1.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (1.0, 0.0)")
# complex -> int
h["TEST"] = 1
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 1")
# Now the same tests but with zeros
h["TEST"] = 0
# int -> float
h["TEST"] = 0.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 0.0")
# float -> int
h["TEST"] = 0
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 0")
# int -> complex
h["TEST"] = 0.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (0.0, 0.0)")
# complex -> float
h["TEST"] = 0.0
assert isinstance(h["TEST"], float)
assert str(h).startswith("TEST = 0.0")
# float -> complex
h["TEST"] = 0.0 + 0.0j
assert isinstance(h["TEST"], complex)
assert str(h).startswith("TEST = (0.0, 0.0)")
# complex -> int
h["TEST"] = 0
assert isinstance(h["TEST"], int)
assert str(h).startswith("TEST = 0")
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
pytest.raises(ValueError, h.set, "HISTORY", "\n")
pytest.raises(ValueError, h.set, "HISTORY", "\nabc")
pytest.raises(ValueError, h.set, "HISTORY", "abc\n")
pytest.raises(ValueError, h.set, "HISTORY", "abc\ndef")
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if "\n" in card_image:
pytest.raises(fits.VerifyError, c.verify, "exception")
else:
c.verify("exception")
def test_long_commentary_card_appended_to_header(self):
"""
If a HISTORY or COMMENT card with a too-long value is appended to a
header with Header.append (as opposed to assigning to hdr['HISTORY']
it fails verification.
Regression test for https://github.com/astropy/astropy/issues/11486
"""
header = fits.Header()
value = "abc" * 90
# this is what Table does when saving its history metadata key to a
# FITS file
header.append(("history", value))
assert len(header.cards) == 1
# Test Card._split() directly since this was the main problem area
key, val = header.cards[0]._split()
assert key == "HISTORY" and val == value
# Try writing adding this header to an HDU and writing it to a file
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp("test.fits"), overwrite=True)
def test_header_fromstring_bytes(self):
"""
Test reading a Header from a `bytes` string.
See https://github.com/astropy/astropy/issues/8706
"""
with open(self.data("test0.fits"), "rb") as fobj:
pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())
pri_hdr = fits.getheader(self.data("test0.fits"))
assert pri_hdr["NAXIS"] == pri_hdr_from_bytes["NAXIS"]
assert pri_hdr == pri_hdr_from_bytes
assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()
def test_set_keyword_with_space(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10479
"""
hdr = fits.Header()
hdr["KEY2 "] = 2
hdr["KEY2 "] = 4
assert len(hdr) == 1
assert hdr["KEY2"] == 4
assert hdr["KEY2 "] == 4
def test_strip(self):
hdr = fits.getheader(self.data("tb.fits"), ext=1)
hdr["FOO"] = "bar"
hdr.strip()
assert set(hdr) == {"HISTORY", "FOO"}
hdr = fits.getheader(self.data("tb.fits"), ext=1)
hdr["FOO"] = "bar"
hdr = hdr.copy(strip=True)
assert set(hdr) == {"HISTORY", "FOO"}
def test_update_invalid_card(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5408
Tests updating the value of a card that is malformatted (with an
invalid value literal).
This tests two ways of reproducing the problem, one working with a
Card object directly, and one when reading/writing a header containing
such an invalid card.
"""
card = fits.Card.fromstring("KW = INF / Comment")
card.value = "FIXED"
assert tuple(card) == ("KW", "FIXED", "Comment")
card.verify("fix")
assert tuple(card) == ("KW", "FIXED", "Comment")
card = fits.Card.fromstring("KW = INF")
hdu = fits.PrimaryHDU()
# This is a loophole to write a header containing a malformatted card
card._verified = True
hdu.header.append(card)
hdu.header.tofile(self.temp("bogus.fits"))
with fits.open(self.temp("bogus.fits")) as hdul:
hdul[0].header["KW"] = -1
hdul.writeto(self.temp("bogus_fixed.fits"))
with fits.open(self.temp("bogus_fixed.fits")) as hdul:
assert hdul[0].header["KW"] == -1
def test_index_numpy_int(self):
header = fits.Header([("A", "FOO"), ("B", 2), ("C", "BAR")])
idx = np.int8(2)
assert header[idx] == "BAR"
header[idx] = "BAZ"
assert header[idx] == "BAZ"
header.insert(idx, ("D", 42))
assert header[idx] == 42
header.add_comment("HELLO")
header.add_comment("WORLD")
assert header["COMMENT"][np.int64(1)] == "WORLD"
header.append(("C", "BAZBAZ"))
assert header[("C", np.int16(0))] == "BAZ"
assert header[("C", np.uint32(1))] == "BAZBAZ"
def test_header_data_size(self):
"""
Tests data size calculation (w/o padding) given a Header.
"""
hdu = fits.PrimaryHDU()
header = hdu.header
assert header.data_size == 0
header["BITPIX"] = 32
header["NAXIS"] = 2
header["NAXIS1"] = 100
header["NAXIS2"] = 100
assert header.data_size == 40000
assert header.data_size_padded == 40320
|
TestHeaderFunctions
|
python
|
ApeWorX__ape
|
tests/functional/test_query.py
|
{
"start": 2223,
"end": 3265
}
|
class ____(BaseInterfaceModel):
number: int
timestamp: int
def test_column_expansion():
columns = validate_and_expand_columns(["*"], Model)
assert columns == list(Model.model_fields)
def test_column_validation(eth_tester_provider, ape_caplog):
with pytest.raises(ValueError) as exc_info:
validate_and_expand_columns(["numbr"], Model)
expected = "Unrecognized field(s) 'numbr', must be one of 'number, timestamp'."
assert exc_info.value.args[-1] == expected
ape_caplog.assert_last_log_with_retries(
lambda: validate_and_expand_columns(["numbr", "timestamp"], Model), expected
)
validate_and_expand_columns(["number", "timestamp", "number"], Model)
assert "Duplicate fields in ['number', 'timestamp', 'number']" in ape_caplog.messages[-1]
def test_specify_engine(chain, eth_tester_provider):
offset = chain.blocks.height + 1
chain.mine(3)
actual = chain.blocks.query("*", engine_to_use="__default__")
expected = offset + 3
assert len(actual) == expected
|
Model
|
python
|
bokeh__bokeh
|
src/bokeh/models/glyphs.py
|
{
"start": 11168,
"end": 12230
}
|
class ____(LRTBGlyph):
''' Render rectangular regions, given a lower-left corner coordinate, width, and height.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/Block.py"
_args = ('x', 'y', 'width', 'height')
x = NumberSpec(default=field("x"), help="""
The x-coordinates of each block's lower-left corner.
""")
y = NumberSpec(default=field("y"), help="""
The y-coordinates of each block's lower-left corner.
""")
width = DistanceSpec(default=1, help="""
The widths of the blocks.
""")
height = DistanceSpec(default=1, help="""
The heights of the blocks.
""")
line_props = Include(LineProps, help="""
The {prop} values for the blocks.
""")
fill_props = Include(FillProps, help="""
The {prop} values for the blocks.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the blocks.
""")
|
Block
|
python
|
ray-project__ray
|
ci/ray_ci/bisect/test_bisector.py
|
{
"start": 246,
"end": 1613
}
|
class ____(Validator):
def __init__(self, return_value: bool) -> None:
self.return_value = return_value
def run(self, test: Test, revision: str) -> bool:
return self.return_value
@mock.patch("ci.ray_ci.bisect.bisector.Bisector._checkout_and_validate")
@mock.patch("ci.ray_ci.bisect.bisector.Bisector._get_revision_lists")
def test_run(mock_get_revision_lists, mock_checkout_and_validate):
def _mock_checkout_and_validate(revision):
return True if revision in ["1", "2", "3"] else False
mock_checkout_and_validate.side_effect = _mock_checkout_and_validate
mock_get_revision_lists.return_value = ["1", "2", "3", "4", "5"]
# Test case 1: T T T F F
assert Bisector(Test(), "1", "5", MacOSValidator(), "dir").run() == "4"
# Test case 2: T F
assert Bisector(Test(), "3", "4", MacOSValidator(), "dir").run() == "4"
# Test case 3: T F F
assert Bisector(Test(), "3", "5", MacOSValidator(), "dir").run() == "4"
@mock.patch("subprocess.check_call")
def test_checkout_and_validate(mock_check_call):
assert Bisector(
Test(), "1", "5", MockValidator(True), "dir"
)._checkout_and_validate("1")
assert not Bisector(
Test(), "1", "5", MockValidator(False), "dir"
)._checkout_and_validate("1")
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
MockValidator
|
python
|
allegroai__clearml
|
clearml/utilities/plotlympl/mplexporter/renderers/fake_renderer.py
|
{
"start": 77,
"end": 2132
}
|
class ____(Renderer):
"""
Fake Renderer
This is a fake renderer which simply outputs a text tree representing the
elements found in the plot(s). This is used in the unit tests for the
package.
Below are the methods your renderer must implement. You are free to do
anything you wish within the renderer (i.e. build an XML or JSON
representation, call an external API, etc.) Here the renderer just
builds a simple string representation for testing purposes.
"""
def __init__(self) -> None:
self.output = ""
def open_figure(self, fig: Any, props: Any) -> None:
self.output += "opening figure\n"
def close_figure(self, fig: Any) -> None:
self.output += "closing figure\n"
def open_axes(self, ax: Any, props: Any) -> None:
self.output += " opening axes\n"
def close_axes(self, ax: Any) -> None:
self.output += " closing axes\n"
def open_legend(self, legend: Any, props: Any) -> None:
self.output += " opening legend\n"
def close_legend(self, legend: Any) -> None:
self.output += " closing legend\n"
def draw_text(
self,
text: str,
position: Any,
coordinates: Any,
style: Any,
text_type: Optional[str] = None,
mplobj: Optional[Any] = None,
) -> None:
self.output += " draw text '{0}' {1}\n".format(text, text_type)
def draw_path(
self,
data: numpy.ndarray,
coordinates: str,
pathcodes: numpy.ndarray,
style: str,
offset: Optional[numpy.ndarray] = None,
offset_coordinates: str = "data",
mplobj: Optional[Any] = None,
) -> None:
self.output += " draw path with {0} vertices\n".format(data.shape[0])
def draw_image(
self,
imdata: Any,
extent: Any,
coordinates: Any,
style: Any,
mplobj: Optional[Any] = None,
) -> None:
self.output += " draw image of size {0}\n".format(len(imdata))
|
FakeRenderer
|
python
|
spyder-ide__spyder
|
spyder/utils/stylesheet.py
|
{
"start": 25921,
"end": 29703
}
|
class ____(BaseDockTabBarStyleSheet):
"""Style for vertical dockwidget tab bars."""
SCROLL_BUTTONS_BORDER_POS = 'bottom'
def set_stylesheet(self):
super().set_stylesheet()
# -- Main constants
css = self.get_stylesheet()
margin_size = AppStyle.MarginSize
# -- Basic style
css['QTabBar::tab'].setValues(
# No margins to top/bottom but left/right to separate tabbar from
# the dockwidget areas
margin=f'0px {2 * margin_size}px',
# Border radius is added for specific tabs (see below)
borderRadius='0px',
# Remove colored borders added by QDarkStyle
borderLeft='0px',
borderRight='0px',
# Padding for text inside tabs
padding='10px 4px',
)
# -- Style for not selected tabs
css['QTabBar::tab:!selected'].setValues(
border='0px',
backgroundColor=SpyderPalette.COLOR_BACKGROUND_4,
borderTop=f'1px solid {SpyderPalette.COLOR_BACKGROUND_4}',
borderBottom=f'1px solid {SpyderPalette.SPECIAL_TABS_SEPARATOR}',
)
css['QTabBar::tab:!selected:hover'].setValues(
backgroundColor=SpyderPalette.COLOR_BACKGROUND_5,
borderTopColor=SpyderPalette.COLOR_BACKGROUND_5,
)
# -- Style for the not selected tabs above and below the selected one.
css['QTabBar::tab:next-selected'].setValues(
borderBottomColor=SpyderPalette.COLOR_BACKGROUND_4,
)
css['QTabBar::tab:next-selected:hover'].setValues(
borderBottomColor=SpyderPalette.SPECIAL_TABS_SEPARATOR,
backgroundColor=SpyderPalette.COLOR_BACKGROUND_5
)
css['QTabBar::tab:previous-selected'].setValues(
borderTopColor=SpyderPalette.COLOR_BACKGROUND_4,
)
css['QTabBar::tab:previous-selected:hover'].setValues(
borderTopColor=SpyderPalette.SPECIAL_TABS_SEPARATOR,
backgroundColor=SpyderPalette.COLOR_BACKGROUND_5
)
# -- First and last tabs have rounded borders.
# Also, add margin to avoid them touch the top and bottom borders,
# respectively.
css['QTabBar::tab:first'].setValues(
borderTopLeftRadius=SpyderPalette.SIZE_BORDER_RADIUS,
borderTopRightRadius=SpyderPalette.SIZE_BORDER_RADIUS,
marginTop=f'{2 * margin_size}px',
)
css['QTabBar::tab:last'].setValues(
borderBottomLeftRadius=SpyderPalette.SIZE_BORDER_RADIUS,
borderBottomRightRadius=SpyderPalette.SIZE_BORDER_RADIUS,
marginBottom=f'{2 * margin_size}px',
)
# -- Last tab doesn't need to show the separator
css['QTabBar::tab:last:!selected'].setValues(
borderBottomColor=SpyderPalette.COLOR_BACKGROUND_4
)
css['QTabBar::tab:last:!selected:hover'].setValues(
borderBottomColor=SpyderPalette.COLOR_BACKGROUND_5,
backgroundColor=SpyderPalette.COLOR_BACKGROUND_5
)
# -- Make style for scroll buttons match the horizontal one
css['QTabBar QToolButton'].setValues(
marginLeft=f'{margin_size}px',
marginRight=f'{margin_size}px',
)
PANES_TABBAR_STYLESHEET = PanesTabBarStyleSheet()
HORIZONTAL_DOCK_TABBAR_STYLESHEET = HorizontalDockTabBarStyleSheet()
VERTICAL_DOCK_TABBAR_STYLESHEET = VerticalDockTabBarStyleSheet()
PREFERENCES_TABBAR_STYLESHEET = PreferencesTabBarStyleSheet()
# =============================================================================
# ---- Style for special dialogs
# =============================================================================
|
VerticalDockTabBarStyleSheet
|
python
|
PyCQA__pylint
|
tests/functional/n/not_async_context_manager.py
|
{
"start": 792,
"end": 1511
}
|
class ____(InheritExit):
def __aenter__(self):
pass
async def bad_coro():
async with 42: # [not-async-context-manager]
pass
async with ctx_manager(): # [not-async-context-manager]
pass
async with ContextManager(): # [not-async-context-manager]
pass
async with PartialAsyncContextManager(): # [not-async-context-manager]
pass
async with SecondPartialAsyncContextManager(): # [not-async-context-manager]
pass
async def good_coro():
async with UnknownBases():
pass
async with AsyncManagerMixin():
pass
async with GoodAsyncManager():
pass
async with SecondGoodAsyncManager():
pass
|
SecondGoodAsyncManager
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/metrics.py
|
{
"start": 91956,
"end": 92832
}
|
class ____(MeanMetricWrapper):
"""Computes the mean absolute error between the labels and predictions.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanAbsoluteError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.25
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanAbsoluteError()])
```
"""
def __init__(self, name='mean_absolute_error', dtype=None):
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name, dtype=dtype)
|
MeanAbsoluteError
|
python
|
numba__numba
|
numba/cuda/tests/doc_examples/test_reduction.py
|
{
"start": 201,
"end": 2274
}
|
class ____(CUDATestCase):
"""
Test shared memory reduction
"""
def setUp(self):
# Prevent output from this test showing up when running the test suite
self._captured_stdout = captured_stdout()
self._captured_stdout.__enter__()
super().setUp()
def tearDown(self):
# No exception type, value, or traceback
self._captured_stdout.__exit__(None, None, None)
super().tearDown()
def test_ex_reduction(self):
# ex_reduction.import.begin
import numpy as np
from numba import cuda
from numba.types import int32
# ex_reduction.import.end
# ex_reduction.allocate.begin
# generate data
a = cuda.to_device(np.arange(1024))
nelem = len(a)
# ex_reduction.allocate.end
# ex_reduction.kernel.begin
@cuda.jit
def array_sum(data):
tid = cuda.threadIdx.x
size = len(data)
if tid < size:
i = cuda.grid(1)
# Declare an array in shared memory
shr = cuda.shared.array(nelem, int32)
shr[tid] = data[i]
# Ensure writes to shared memory are visible
# to all threads before reducing
cuda.syncthreads()
s = 1
while s < cuda.blockDim.x:
if tid % (2 * s) == 0:
# Stride by `s` and add
shr[tid] += shr[tid + s]
s *= 2
cuda.syncthreads()
# After the loop, the zeroth element contains the sum
if tid == 0:
data[tid] = shr[tid]
# ex_reduction.kernel.end
# ex_reduction.launch.begin
array_sum[1, nelem](a)
print(a[0]) # 523776
print(sum(np.arange(1024))) # 523776
# ex_reduction.launch.end
np.testing.assert_equal(a[0], sum(np.arange(1024)))
if __name__ == "__main__":
unittest.main()
|
TestReduction
|
python
|
ZoranPandovski__al-go-rithms
|
cryptography/blockchain/python/blockchain.py
|
{
"start": 829,
"end": 1682
}
|
class ____:
def __init__(self, index, data, previous_hash, reward):
self.index = index
self.data = data
self.previous_hash = previous_hash
self.timestamp = str(datetime.datetime.now())
hash_data = str(self.index) + str(self.previous_hash) + str(self.timestamp) + json.dumps(self.data)
self.hash,self.nonce,self.difficulty = proof_of_work(hash_data,-1)
self.reward = reward
def print_block_details(self):
print(f'Details for block indexed at {self.index} : ')
print(f'\tData : {self.data}')
print(f'\tTimeStamp : {self.timestamp}')
print(f'\tHash : {self.hash}')
print(f'\tPrevious Hash : {self.previous_hash}')
print(f'\tReward : {self.reward}')
print(f'\tNonce : {self.nonce}')
print(f'\tDifficulty : {self.difficulty}')
|
Block
|
python
|
mlflow__mlflow
|
tests/resources/mlflow-test-plugin/mlflow_test_plugin/dummy_dataset_source.py
|
{
"start": 224,
"end": 1535
}
|
class ____(DatasetSource):
def __init__(self, uri):
self._uri = uri
@property
def uri(self):
return self._uri
@staticmethod
def _get_source_type() -> str:
return "dummy"
def load(self) -> str:
# Ignore the "dummy" URI scheme and download the local path
from mlflow.artifacts import download_artifacts
parsed_uri = urlparse(self._uri)
return download_artifacts(parsed_uri.path)
@staticmethod
def _can_resolve(raw_source: Any) -> bool:
if not isinstance(raw_source, str):
return False
try:
parsed_source = urlparse(raw_source)
return parsed_source.scheme == "dummy"
except Exception:
return False
@classmethod
def _resolve(cls, raw_source: Any) -> DatasetSource:
return cls(raw_source)
def _to_dict(self) -> dict[Any, Any]:
return {"uri": self.uri}
@classmethod
def _from_dict(cls, source_dict: dict[Any, Any]) -> DatasetSource:
uri = source_dict.get("uri")
if uri is None:
raise MlflowException(
'Failed to parse dummy dataset source. Missing expected key: "uri"',
INVALID_PARAMETER_VALUE,
)
return cls(uri=uri)
|
DummyDatasetSource
|
python
|
realpython__materials
|
python-contact-book/source_code_step_5/rpcontacts/model.py
|
{
"start": 160,
"end": 1051
}
|
class ____:
def __init__(self):
self.model = self._createModel()
@staticmethod
def _createModel():
"""Create and set up the model."""
tableModel = QSqlTableModel()
tableModel.setTable("contacts")
tableModel.setEditStrategy(QSqlTableModel.OnFieldChange)
tableModel.select()
headers = ("ID", "Name", "Job", "Email")
for columnIndex, header in enumerate(headers):
tableModel.setHeaderData(columnIndex, Qt.Horizontal, header)
return tableModel
def addContact(self, data):
"""Add a contact to the database."""
rows = self.model.rowCount()
self.model.insertRows(rows, 1)
for column_index, field in enumerate(data):
self.model.setData(self.model.index(rows, column_index + 1), field)
self.model.submitAll()
self.model.select()
|
ContactsModel
|
python
|
getsentry__sentry
|
src/sentry_plugins/sessionstack/plugin.py
|
{
"start": 997,
"end": 6575
}
|
class ____(CorePluginMixin, Plugin2):
description = "Watch SessionStack recordings in Sentry."
title = "SessionStack"
slug = "sessionstack"
conf_title = title
conf_key = slug
required_field = "account_email"
feature_descriptions = [
FeatureDescription(
"""
Watch the SessionStack session replay of a user in a video widget embedded in the Sentry UI for an issue.
""",
IntegrationFeatures.SESSION_REPLAY,
)
]
def has_project_conf(self) -> bool:
return True
def get_custom_contexts(self):
return [SessionStackContextType]
def reset_options(self, project=None):
self.disable(project)
self.set_option("account_email", "", project)
self.set_option("api_token", "", project)
self.set_option("website_id", "", project)
self.set_option("player_url", "", project)
self.set_option("api_url", "", project)
def is_testable(self, **kwargs) -> bool:
return False
def validate_config(self, project, config, actor=None):
sessionstack_client = SessionStackClient(
account_email=config.get("account_email"),
api_token=config.get("api_token"),
website_id=config.get("website_id"),
api_url=config.get("api_url"),
player_url=config.get("player_url"),
)
try:
sessionstack_client.validate_api_access()
except UnauthorizedError:
raise PluginError(UNAUTHORIZED_ERROR)
except InvalidApiUrlError:
raise PluginError(INVALID_API_URL_ERROR)
except InvalidWebsiteIdError:
raise PluginError(INVALID_WEBSITE_ID_ERROR)
except Exception:
raise PluginError(UNEXPECTED_ERROR)
return config
def get_config(self, project, user=None, initial=None, add_additional_fields: bool = False):
account_email = self.get_option("account_email", project)
website_id = self.get_option("website_id", project)
api_url = self.get_option("api_url", project)
player_url = self.get_option("player_url", project)
configurations = [
{
"name": "account_email",
"label": "Account Email",
"default": account_email,
"type": "text",
"placeholder": 'e.g. "user@example.com"',
"required": True,
},
get_secret_field_config(
name="api_token",
label="API Token",
secret=self.get_option("api_token", project),
help="SessionStack generated API token.",
required=True,
),
{
"name": "website_id",
"label": "Website ID",
"default": website_id,
"type": "number",
"help": "ID of the corresponding website in SessionStack.",
"required": True,
},
]
if is_self_hosted():
# We only support connecting to an on-premises SessionStack from a
# self-hosted Sentry: https://docs.sessionstack.com/docs/sentry.
configurations.extend(
[
{
"name": "api_url",
"label": "SessionStack API URL",
"default": api_url,
"type": "text",
"help": "URL to SessionStack's REST API. The default "
'value is "https://api.sessionstack.com/"',
"required": False,
},
{
"name": "player_url",
"label": "SessionStack Player URL",
"default": player_url,
"type": "text",
"help": "URL to SessionStack's session player. The default "
'value is "http://app.sessionstack.com/player/"',
"required": False,
},
]
)
return configurations
def get_event_preprocessors(self, data: Mapping[str, Any]) -> Sequence[EventPreprocessor]:
context = SessionStackContextType.primary_value_for_data(data)
if not context:
return []
session_id = context.get("session_id")
if not session_id:
return []
project = Project.objects.get_from_cache(id=data.get("project"))
if not self.is_enabled(project):
return []
def preprocess_event(data: MutableMapping[str, Any]) -> MutableMapping[str, Any] | None:
sessionstack_client = SessionStackClient(
account_email=self.get_option("account_email", project),
api_token=self.get_option("api_token", project),
website_id=self.get_option("website_id", project),
api_url=self.get_option("api_url", project),
player_url=self.get_option("player_url", project),
)
session_url = sessionstack_client.get_session_url(
session_id=session_id, event_timestamp=context.get("timestamp")
)
context["session_url"] = session_url
contexts = data.get("contexts") or {}
contexts["sessionstack"] = context
data["contexts"] = contexts
return data
return [preprocess_event]
|
SessionStackPlugin
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/core.py
|
{
"start": 38504,
"end": 40541
}
|
class ____(ORMBaseModel):
"""An ORM representation of a work pool"""
name: NonEmptyishName = Field(
description="The name of the work pool.",
)
description: Optional[str] = Field(
default=None, description="A description of the work pool."
)
type: str = Field(description="The work pool type.")
base_job_template: Dict[str, Any] = Field(
default_factory=dict, description="The work pool's base job template."
)
is_paused: bool = Field(
default=False,
description="Pausing the work pool stops the delivery of all work.",
)
concurrency_limit: Optional[NonNegativeInteger] = Field(
default=None, description="A concurrency limit for the work pool."
)
status: Optional[WorkPoolStatus] = Field(
default=None, description="The current status of the work pool."
)
# this required field has a default of None so that the custom validator
# below will be called and produce a more helpful error message
default_queue_id: Optional[UUID] = Field(
default=None, description="The id of the pool's default queue."
)
storage_configuration: WorkPoolStorageConfiguration = Field(
default_factory=WorkPoolStorageConfiguration,
description="The storage configuration for the work pool.",
)
@field_validator("default_queue_id")
def helpful_error_for_missing_default_queue_id(cls, v: UUID | None) -> UUID:
return validate_default_queue_id_not_none(v)
@classmethod
def model_validate(
cls: Type[Self],
obj: Any,
*,
strict: Optional[bool] = None,
from_attributes: Optional[bool] = None,
context: Optional[dict[str, Any]] = None,
) -> Self:
parsed: WorkPool = super().model_validate(
obj, strict=strict, from_attributes=from_attributes, context=context
)
if from_attributes:
if obj.type == "prefect-agent":
parsed.status = None
return parsed
|
WorkPool
|
python
|
pytorch__pytorch
|
benchmarks/dynamo/pr_time_benchmarks/benchmarks/dtensor.py
|
{
"start": 1110,
"end": 1731
}
|
class ____(BenchmarkDTensorDispatch):
def __init__(self, world_size) -> None:
super().__init__(operator="detach", world_size=world_size)
def _work(self) -> None:
self.a.detach()
def main():
world_size = 256
fake_store = FakeStore()
torch.distributed.init_process_group(
"fake", store=fake_store, rank=0, world_size=world_size
)
result_path = sys.argv[1]
BenchmarkDetach(world_size).enable_instruction_count().collect_all().append_results(
result_path
)
torch.distributed.destroy_process_group()
if __name__ == "__main__":
main()
|
BenchmarkDetach
|
python
|
pydantic__pydantic
|
tests/test_json_schema.py
|
{
"start": 96657,
"end": 192893
}
|
class ____(BaseModel):
my_enum_2: MyEnum
"""
)
class Model(BaseModel):
my_model_1: module_1.MyModel
my_model_2: module_2.MyModel
assert len(Model.model_json_schema()['$defs']) == 4
assert set(Model.model_json_schema()['$defs']) == {
f'{module_1.__name__}__MyEnum',
f'{module_1.__name__}__MyModel',
f'{module_2.__name__}__MyEnum',
f'{module_2.__name__}__MyModel',
}
def test_mode_name_causes_no_conflict():
class Organization(BaseModel):
pass
class OrganizationInput(BaseModel):
pass
class OrganizationOutput(BaseModel):
pass
class Model(BaseModel):
# Ensure the validation and serialization schemas are different:
x: Organization = Field(validation_alias='x_validation', serialization_alias='x_serialization')
y: OrganizationInput
z: OrganizationOutput
assert Model.model_json_schema(mode='validation') == {
'$defs': {
'Organization': {'properties': {}, 'title': 'Organization', 'type': 'object'},
'OrganizationInput': {'properties': {}, 'title': 'OrganizationInput', 'type': 'object'},
'OrganizationOutput': {'properties': {}, 'title': 'OrganizationOutput', 'type': 'object'},
},
'properties': {
'x_validation': {'$ref': '#/$defs/Organization'},
'y': {'$ref': '#/$defs/OrganizationInput'},
'z': {'$ref': '#/$defs/OrganizationOutput'},
},
'required': ['x_validation', 'y', 'z'],
'title': 'Model',
'type': 'object',
}
assert Model.model_json_schema(mode='serialization') == {
'$defs': {
'Organization': {'properties': {}, 'title': 'Organization', 'type': 'object'},
'OrganizationInput': {'properties': {}, 'title': 'OrganizationInput', 'type': 'object'},
'OrganizationOutput': {'properties': {}, 'title': 'OrganizationOutput', 'type': 'object'},
},
'properties': {
'x_serialization': {'$ref': '#/$defs/Organization'},
'y': {'$ref': '#/$defs/OrganizationInput'},
'z': {'$ref': '#/$defs/OrganizationOutput'},
},
'required': ['x_serialization', 'y', 'z'],
'title': 'Model',
'type': 'object',
}
def test_ref_conflict_resolution_without_mode_difference():
class OrganizationInput(BaseModel):
pass
class Organization(BaseModel):
x: int
schema_with_defs, defs = GenerateJsonSchema().generate_definitions(
[
(Organization, 'validation', Organization.__pydantic_core_schema__),
(Organization, 'serialization', Organization.__pydantic_core_schema__),
(OrganizationInput, 'validation', OrganizationInput.__pydantic_core_schema__),
]
)
assert schema_with_defs == {
(Organization, 'serialization'): {'$ref': '#/$defs/Organization'},
(Organization, 'validation'): {'$ref': '#/$defs/Organization'},
(OrganizationInput, 'validation'): {'$ref': '#/$defs/OrganizationInput'},
}
assert defs == {
'OrganizationInput': {'properties': {}, 'title': 'OrganizationInput', 'type': 'object'},
'Organization': {
'properties': {'x': {'title': 'X', 'type': 'integer'}},
'required': ['x'],
'title': 'Organization',
'type': 'object',
},
}
def test_ref_conflict_resolution_with_mode_difference():
class OrganizationInput(BaseModel):
pass
class Organization(BaseModel):
x: int
@field_serializer('x')
def serialize_x(self, v: int) -> str:
return str(v)
schema_with_defs, defs = GenerateJsonSchema().generate_definitions(
[
(Organization, 'validation', Organization.__pydantic_core_schema__),
(Organization, 'serialization', Organization.__pydantic_core_schema__),
(OrganizationInput, 'validation', OrganizationInput.__pydantic_core_schema__),
]
)
assert schema_with_defs == {
(Organization, 'serialization'): {'$ref': '#/$defs/Organization-Output'},
(Organization, 'validation'): {'$ref': '#/$defs/Organization-Input'},
(OrganizationInput, 'validation'): {'$ref': '#/$defs/OrganizationInput'},
}
assert defs == {
'OrganizationInput': {'properties': {}, 'title': 'OrganizationInput', 'type': 'object'},
'Organization-Input': {
'properties': {'x': {'title': 'X', 'type': 'integer'}},
'required': ['x'],
'title': 'Organization',
'type': 'object',
},
'Organization-Output': {
'properties': {'x': {'title': 'X', 'type': 'string'}},
'required': ['x'],
'title': 'Organization',
'type': 'object',
},
}
def test_conflicting_names():
class Organization__Input(BaseModel):
pass
class Organization(BaseModel):
x: int
@field_serializer('x')
def serialize_x(self, v: int) -> str:
return str(v)
schema_with_defs, defs = GenerateJsonSchema().generate_definitions(
[
(Organization, 'validation', Organization.__pydantic_core_schema__),
(Organization, 'serialization', Organization.__pydantic_core_schema__),
(Organization__Input, 'validation', Organization__Input.__pydantic_core_schema__),
]
)
assert schema_with_defs == {
(Organization, 'serialization'): {'$ref': '#/$defs/Organization-Output'},
(Organization, 'validation'): {'$ref': '#/$defs/Organization-Input'},
(Organization__Input, 'validation'): {'$ref': '#/$defs/Organization__Input'},
}
assert defs == {
'Organization__Input': {'properties': {}, 'title': 'Organization__Input', 'type': 'object'},
'Organization-Input': {
'properties': {'x': {'title': 'X', 'type': 'integer'}},
'required': ['x'],
'title': 'Organization',
'type': 'object',
},
'Organization-Output': {
'properties': {'x': {'title': 'X', 'type': 'string'}},
'required': ['x'],
'title': 'Organization',
'type': 'object',
},
}
@pytest.mark.skip_json_schema_validation(reason='Custom type used.')
def test_schema_for_generic_field():
T = TypeVar('T')
class GenModel(Generic[T]):
def __init__(self, data: Any):
self.data = data
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v: Any):
return v
@classmethod
def __get_pydantic_core_schema__(
cls,
source: Any,
handler: GetCoreSchemaHandler,
) -> core_schema.PlainValidatorFunctionSchema:
source_args = getattr(source, '__args__', [Any])
param = source_args[0]
metadata = {'pydantic_js_functions': [lambda _c, h: h(handler.generate_schema(param))]}
return core_schema.with_info_plain_validator_function(
GenModel,
metadata=metadata,
)
class Model(BaseModel):
data: GenModel[str]
data1: GenModel
model_config = dict(arbitrary_types_allowed=True)
assert Model.model_json_schema() == {
'title': 'Model',
'type': 'object',
'properties': {
'data': {'type': 'string', 'title': 'Data'},
'data1': {
'title': 'Data1',
},
},
'required': ['data', 'data1'],
}
class GenModelModified(GenModel, Generic[T]):
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
field_schema = handler(core_schema)
type = field_schema.pop('type', 'other')
field_schema.update(anyOf=[{'type': type}, {'type': 'array', 'items': {'type': type}}])
return field_schema
class ModelModified(BaseModel):
data: GenModelModified[str]
data1: GenModelModified
model_config = dict(arbitrary_types_allowed=True)
assert ModelModified.model_json_schema() == {
'title': 'ModelModified',
'type': 'object',
'properties': {
'data': {'title': 'Data', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]},
'data1': {'title': 'Data1', 'anyOf': [{'type': 'other'}, {'type': 'array', 'items': {'type': 'other'}}]},
},
'required': ['data', 'data1'],
}
def test_namedtuple_default():
class Coordinates(NamedTuple):
x: float
y: float
class LocationBase(BaseModel):
coords: Coordinates = Coordinates(34, 42)
assert LocationBase(coords=Coordinates(1, 2)).coords == Coordinates(1, 2)
assert LocationBase.model_json_schema() == {
'$defs': {
'Coordinates': {
'maxItems': 2,
'minItems': 2,
'prefixItems': [{'title': 'X', 'type': 'number'}, {'title': 'Y', 'type': 'number'}],
'type': 'array',
}
},
'properties': {'coords': {'$ref': '#/$defs/Coordinates', 'default': [34, 42]}},
'title': 'LocationBase',
'type': 'object',
}
def test_namedtuple_modify_schema():
class Coordinates(NamedTuple):
x: float
y: float
class CustomCoordinates(Coordinates):
@classmethod
def __get_pydantic_core_schema__(cls, source: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
schema = handler(source)
schema['arguments_schema']['metadata']['pydantic_js_prefer_positional_arguments'] = False
return schema
class Location(BaseModel):
coords: CustomCoordinates = CustomCoordinates(34, 42)
assert Location.model_json_schema() == {
'$defs': {
'CustomCoordinates': {
'additionalProperties': False,
'properties': {'x': {'title': 'X', 'type': 'number'}, 'y': {'title': 'Y', 'type': 'number'}},
'required': ['x', 'y'],
'type': 'object',
}
},
'properties': {'coords': {'$ref': '#/$defs/CustomCoordinates', 'default': [34, 42]}},
'title': 'Location',
'type': 'object',
}
def test_advanced_generic_schema(): # noqa: C901
T = TypeVar('T')
K = TypeVar('K')
class Gen(Generic[T]):
def __init__(self, data: Any):
self.data = data
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v: Any):
return v
@classmethod
def __get_pydantic_core_schema__(cls, source: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
if hasattr(source, '__args__'):
arg = source.__args__[0]
def js_func(s, h):
# ignore the schema we were given and get a new CoreSchema
s = handler.generate_schema(Optional[arg])
return h(s)
return core_schema.with_info_plain_validator_function(
Gen,
metadata={'pydantic_js_annotation_functions': [js_func]},
)
else:
return handler(source)
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
try:
field_schema = handler(core_schema)
except PydanticInvalidForJsonSchema:
field_schema = {}
the_type = field_schema.pop('anyOf', [{'type': 'string'}])[0]
field_schema.update(title='Gen title', anyOf=[the_type, {'type': 'array', 'items': the_type}])
return field_schema
class GenTwoParams(Generic[T, K]):
def __init__(self, x: str, y: Any):
self.x = x
self.y = y
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v: Any):
return cls(*v)
@classmethod
def __get_pydantic_core_schema__(
cls, source: Any, handler: GetCoreSchemaHandler, **_kwargs: Any
) -> core_schema.CoreSchema:
if hasattr(source, '__args__'):
# the js_function ignores the schema we were given and gets a new Tuple CoreSchema
metadata = {'pydantic_js_functions': [lambda _c, h: h(handler(tuple[source.__args__]))]}
return core_schema.with_info_plain_validator_function(
GenTwoParams,
metadata=metadata,
)
return handler(source)
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
field_schema = handler(core_schema)
field_schema.pop('minItems')
field_schema.pop('maxItems')
field_schema.update(examples=[['a', 'e0add881-8b94-4368-8286-f8607928924e']])
return field_schema
class CustomType(Enum):
A = 'a'
B = 'b'
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> core_schema.CoreSchema:
json_schema = handler(core_schema)
json_schema.update(title='CustomType title', type='string')
return json_schema
class Model(BaseModel):
data0: Gen
data1: Gen[CustomType] = Field(title='Data1 title', description='Data 1 description')
data2: GenTwoParams[CustomType, UUID4] = Field(title='Data2 title', description='Data 2')
# check Tuple because changes in code touch that type
data3: tuple
data4: tuple[CustomType]
data5: tuple[CustomType, str]
model_config = {'arbitrary_types_allowed': True}
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'$defs': {'CustomType': {'enum': ['a', 'b'], 'title': 'CustomType title', 'type': 'string'}},
'properties': {
'data0': {
'anyOf': [{'type': 'string'}, {'items': {'type': 'string'}, 'type': 'array'}],
'title': 'Gen title',
},
'data1': {
'anyOf': [{'$ref': '#/$defs/CustomType'}, {'items': {'$ref': '#/$defs/CustomType'}, 'type': 'array'}],
'description': 'Data 1 description',
'title': 'Data1 title',
},
'data2': {
'description': 'Data 2',
'examples': [['a', 'e0add881-8b94-4368-8286-f8607928924e']],
'prefixItems': [{'$ref': '#/$defs/CustomType'}, {'format': 'uuid4', 'type': 'string'}],
'title': 'Data2 title',
'type': 'array',
},
'data3': {'items': {}, 'title': 'Data3', 'type': 'array'},
'data4': {
'maxItems': 1,
'minItems': 1,
'prefixItems': [{'$ref': '#/$defs/CustomType'}],
'title': 'Data4',
'type': 'array',
},
'data5': {
'maxItems': 2,
'minItems': 2,
'prefixItems': [{'$ref': '#/$defs/CustomType'}, {'type': 'string'}],
'title': 'Data5',
'type': 'array',
},
},
'required': ['data0', 'data1', 'data2', 'data3', 'data4', 'data5'],
'title': 'Model',
'type': 'object',
}
def test_nested_generic():
"""
Test a nested BaseModel that is also a Generic
"""
class Ref(BaseModel, Generic[T]):
uuid: str
def resolve(self) -> T: ...
class Model(BaseModel):
ref: Ref['Model']
assert Model.model_json_schema() == {
'title': 'Model',
'type': 'object',
'$defs': {
'Ref_Model_': {
'title': 'Ref[Model]',
'type': 'object',
'properties': {
'uuid': {'title': 'Uuid', 'type': 'string'},
},
'required': ['uuid'],
},
},
'properties': {
'ref': {'$ref': '#/$defs/Ref_Model_'},
},
'required': ['ref'],
}
def test_nested_generic_model():
"""
Test a nested generic model
"""
class Box(BaseModel, Generic[T]):
uuid: str
data: T
class Model(BaseModel):
box_str: Box[str]
box_int: Box[int]
assert Model.model_json_schema() == {
'title': 'Model',
'type': 'object',
'$defs': {
'Box_str_': Box[str].model_json_schema(),
'Box_int_': Box[int].model_json_schema(),
},
'properties': {
'box_str': {'$ref': '#/$defs/Box_str_'},
'box_int': {'$ref': '#/$defs/Box_int_'},
},
'required': ['box_str', 'box_int'],
}
def test_complex_nested_generic():
"""
Handle a union of a generic.
"""
class Ref(BaseModel, Generic[T]):
uuid: str
def resolve(self) -> T: ...
class Model(BaseModel):
uuid: str
model: Union[Ref['Model'], 'Model']
def resolve(self) -> 'Model': ...
Model.model_rebuild()
assert Model.model_json_schema() == {
'$defs': {
'Model': {
'title': 'Model',
'type': 'object',
'properties': {
'uuid': {'title': 'Uuid', 'type': 'string'},
'model': {
'title': 'Model',
'anyOf': [
{'$ref': '#/$defs/Ref_Model_'},
{'$ref': '#/$defs/Model'},
],
},
},
'required': ['uuid', 'model'],
},
'Ref_Model_': {
'title': 'Ref[Model]',
'type': 'object',
'properties': {'uuid': {'title': 'Uuid', 'type': 'string'}},
'required': ['uuid'],
},
},
'$ref': '#/$defs/Model',
}
def test_modify_schema_dict_keys() -> None:
class MyType:
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
return {'test': 'passed'}
class MyModel(BaseModel):
my_field: dict[str, MyType]
model_config = dict(arbitrary_types_allowed=True)
assert MyModel.model_json_schema() == {
'properties': {
'my_field': {'additionalProperties': {'test': 'passed'}, 'title': 'My Field', 'type': 'object'} # <----
},
'required': ['my_field'],
'title': 'MyModel',
'type': 'object',
}
def test_remove_anyof_redundancy() -> None:
class A:
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
return handler({'type': 'str'})
class B:
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
return handler({'type': 'str'})
class MyModel(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
# Union of two objects should give a JSON with an `anyOf` field, but in this case
# since the fields are the same, the `anyOf` is removed.
field: Union[A, B]
assert MyModel.model_json_schema() == {
'properties': {'field': {'title': 'Field', 'type': 'string'}},
'required': ['field'],
'title': 'MyModel',
'type': 'object',
}
def test_discriminated_union():
class Cat(BaseModel):
pet_type: Literal['cat']
class Dog(BaseModel):
pet_type: Literal['dog']
class Lizard(BaseModel):
pet_type: Literal['reptile', 'lizard']
class Model(BaseModel):
pet: Union[Cat, Dog, Lizard] = Field(discriminator='pet_type')
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'$defs': {
'Cat': {
'properties': {'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'}},
'required': ['pet_type'],
'title': 'Cat',
'type': 'object',
},
'Dog': {
'properties': {'pet_type': {'const': 'dog', 'title': 'Pet Type', 'type': 'string'}},
'required': ['pet_type'],
'title': 'Dog',
'type': 'object',
},
'Lizard': {
'properties': {'pet_type': {'enum': ['reptile', 'lizard'], 'title': 'Pet Type', 'type': 'string'}},
'required': ['pet_type'],
'title': 'Lizard',
'type': 'object',
},
},
'properties': {
'pet': {
'discriminator': {
'mapping': {
'cat': '#/$defs/Cat',
'dog': '#/$defs/Dog',
'lizard': '#/$defs/Lizard',
'reptile': '#/$defs/Lizard',
},
'propertyName': 'pet_type',
},
'oneOf': [{'$ref': '#/$defs/Cat'}, {'$ref': '#/$defs/Dog'}, {'$ref': '#/$defs/Lizard'}],
'title': 'Pet',
}
},
'required': ['pet'],
'title': 'Model',
'type': 'object',
}
def test_discriminated_annotated_union():
class Cat(BaseModel):
pet_type: Literal['cat']
class Dog(BaseModel):
pet_type: Literal['dog']
class Lizard(BaseModel):
pet_type: Literal['reptile', 'lizard']
class Model(BaseModel):
pet: Annotated[Union[Cat, Dog, Lizard], Field(discriminator='pet_type')]
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'$defs': {
'Cat': {
'properties': {'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'}},
'required': ['pet_type'],
'title': 'Cat',
'type': 'object',
},
'Dog': {
'properties': {'pet_type': {'const': 'dog', 'title': 'Pet Type', 'type': 'string'}},
'required': ['pet_type'],
'title': 'Dog',
'type': 'object',
},
'Lizard': {
'properties': {'pet_type': {'enum': ['reptile', 'lizard'], 'title': 'Pet Type', 'type': 'string'}},
'required': ['pet_type'],
'title': 'Lizard',
'type': 'object',
},
},
'properties': {
'pet': {
'discriminator': {
'mapping': {
'cat': '#/$defs/Cat',
'dog': '#/$defs/Dog',
'lizard': '#/$defs/Lizard',
'reptile': '#/$defs/Lizard',
},
'propertyName': 'pet_type',
},
'oneOf': [{'$ref': '#/$defs/Cat'}, {'$ref': '#/$defs/Dog'}, {'$ref': '#/$defs/Lizard'}],
'title': 'Pet',
}
},
'required': ['pet'],
'title': 'Model',
'type': 'object',
}
def test_nested_discriminated_union():
class BlackCatWithHeight(BaseModel):
color: Literal['black']
info: Literal['height']
height: float
class BlackCatWithWeight(BaseModel):
color: Literal['black']
info: Literal['weight']
weight: float
BlackCat = Annotated[Union[BlackCatWithHeight, BlackCatWithWeight], Field(discriminator='info')]
class WhiteCat(BaseModel):
color: Literal['white']
white_cat_info: str
class Cat(BaseModel):
pet: Annotated[Union[BlackCat, WhiteCat], Field(discriminator='color')]
# insert_assert(Cat.model_json_schema())
assert Cat.model_json_schema() == {
'$defs': {
'BlackCatWithHeight': {
'properties': {
'color': {'const': 'black', 'title': 'Color', 'type': 'string'},
'height': {'title': 'Height', 'type': 'number'},
'info': {'const': 'height', 'title': 'Info', 'type': 'string'},
},
'required': ['color', 'info', 'height'],
'title': 'BlackCatWithHeight',
'type': 'object',
},
'BlackCatWithWeight': {
'properties': {
'color': {'const': 'black', 'title': 'Color', 'type': 'string'},
'info': {'const': 'weight', 'title': 'Info', 'type': 'string'},
'weight': {'title': 'Weight', 'type': 'number'},
},
'required': ['color', 'info', 'weight'],
'title': 'BlackCatWithWeight',
'type': 'object',
},
'WhiteCat': {
'properties': {
'color': {'const': 'white', 'title': 'Color', 'type': 'string'},
'white_cat_info': {'title': 'White Cat Info', 'type': 'string'},
},
'required': ['color', 'white_cat_info'],
'title': 'WhiteCat',
'type': 'object',
},
},
'properties': {
'pet': {
'discriminator': {
'mapping': {
'black': {
'discriminator': {
'mapping': {
'height': '#/$defs/BlackCatWithHeight',
'weight': '#/$defs/BlackCatWithWeight',
},
'propertyName': 'info',
},
'oneOf': [{'$ref': '#/$defs/BlackCatWithHeight'}, {'$ref': '#/$defs/BlackCatWithWeight'}],
},
'white': '#/$defs/WhiteCat',
},
'propertyName': 'color',
},
'oneOf': [
{
'discriminator': {
'mapping': {'height': '#/$defs/BlackCatWithHeight', 'weight': '#/$defs/BlackCatWithWeight'},
'propertyName': 'info',
},
'oneOf': [{'$ref': '#/$defs/BlackCatWithHeight'}, {'$ref': '#/$defs/BlackCatWithWeight'}],
},
{'$ref': '#/$defs/WhiteCat'},
],
'title': 'Pet',
}
},
'required': ['pet'],
'title': 'Cat',
'type': 'object',
}
def test_deeper_nested_discriminated_annotated_union():
class BlackCatWithHeight(BaseModel):
pet_type: Literal['cat']
color: Literal['black']
info: Literal['height']
black_infos: str
class BlackCatWithWeight(BaseModel):
pet_type: Literal['cat']
color: Literal['black']
info: Literal['weight']
black_infos: str
BlackCat = Annotated[Union[BlackCatWithHeight, BlackCatWithWeight], Field(discriminator='info')]
class WhiteCat(BaseModel):
pet_type: Literal['cat']
color: Literal['white']
white_infos: str
Cat = Annotated[Union[BlackCat, WhiteCat], Field(discriminator='color')]
class Dog(BaseModel):
pet_type: Literal['dog']
dog_name: str
Pet = Annotated[Union[Cat, Dog], Field(discriminator='pet_type')]
class Model(BaseModel):
pet: Pet
number: int
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'$defs': {
'BlackCatWithHeight': {
'properties': {
'black_infos': {'title': 'Black Infos', 'type': 'string'},
'color': {'const': 'black', 'title': 'Color', 'type': 'string'},
'info': {'const': 'height', 'title': 'Info', 'type': 'string'},
'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'},
},
'required': ['pet_type', 'color', 'info', 'black_infos'],
'title': 'BlackCatWithHeight',
'type': 'object',
},
'BlackCatWithWeight': {
'properties': {
'black_infos': {'title': 'Black Infos', 'type': 'string'},
'color': {'const': 'black', 'title': 'Color', 'type': 'string'},
'info': {'const': 'weight', 'title': 'Info', 'type': 'string'},
'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'},
},
'required': ['pet_type', 'color', 'info', 'black_infos'],
'title': 'BlackCatWithWeight',
'type': 'object',
},
'Dog': {
'properties': {
'dog_name': {'title': 'Dog Name', 'type': 'string'},
'pet_type': {'const': 'dog', 'title': 'Pet Type', 'type': 'string'},
},
'required': ['pet_type', 'dog_name'],
'title': 'Dog',
'type': 'object',
},
'WhiteCat': {
'properties': {
'color': {'const': 'white', 'title': 'Color', 'type': 'string'},
'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'},
'white_infos': {'title': 'White Infos', 'type': 'string'},
},
'required': ['pet_type', 'color', 'white_infos'],
'title': 'WhiteCat',
'type': 'object',
},
},
'properties': {
'number': {'title': 'Number', 'type': 'integer'},
'pet': {
'discriminator': {
'mapping': {
'cat': {
'discriminator': {
'mapping': {
'black': {
'discriminator': {
'mapping': {
'height': '#/$defs/BlackCatWithHeight',
'weight': '#/$defs/BlackCatWithWeight',
},
'propertyName': 'info',
},
'oneOf': [
{'$ref': '#/$defs/BlackCatWithHeight'},
{'$ref': '#/$defs/BlackCatWithWeight'},
],
},
'white': '#/$defs/WhiteCat',
},
'propertyName': 'color',
},
'oneOf': [
{
'discriminator': {
'mapping': {
'height': '#/$defs/BlackCatWithHeight',
'weight': '#/$defs/BlackCatWithWeight',
},
'propertyName': 'info',
},
'oneOf': [
{'$ref': '#/$defs/BlackCatWithHeight'},
{'$ref': '#/$defs/BlackCatWithWeight'},
],
},
{'$ref': '#/$defs/WhiteCat'},
],
},
'dog': '#/$defs/Dog',
},
'propertyName': 'pet_type',
},
'oneOf': [
{
'discriminator': {
'mapping': {
'black': {
'discriminator': {
'mapping': {
'height': '#/$defs/BlackCatWithHeight',
'weight': '#/$defs/BlackCatWithWeight',
},
'propertyName': 'info',
},
'oneOf': [
{'$ref': '#/$defs/BlackCatWithHeight'},
{'$ref': '#/$defs/BlackCatWithWeight'},
],
},
'white': '#/$defs/WhiteCat',
},
'propertyName': 'color',
},
'oneOf': [
{
'discriminator': {
'mapping': {
'height': '#/$defs/BlackCatWithHeight',
'weight': '#/$defs/BlackCatWithWeight',
},
'propertyName': 'info',
},
'oneOf': [
{'$ref': '#/$defs/BlackCatWithHeight'},
{'$ref': '#/$defs/BlackCatWithWeight'},
],
},
{'$ref': '#/$defs/WhiteCat'},
],
},
{'$ref': '#/$defs/Dog'},
],
'title': 'Pet',
},
},
'required': ['pet', 'number'],
'title': 'Model',
'type': 'object',
}
def test_discriminated_annotated_union_literal_enum():
class PetType(Enum):
cat = 'cat'
dog = 'dog'
class PetColor(str, Enum):
black = 'black'
white = 'white'
class PetInfo(Enum):
height = 0
weight = 1
class BlackCatWithHeight(BaseModel):
pet_type: Literal[PetType.cat]
color: Literal[PetColor.black]
info: Literal[PetInfo.height]
black_infos: str
class BlackCatWithWeight(BaseModel):
pet_type: Literal[PetType.cat]
color: Literal[PetColor.black]
info: Literal[PetInfo.weight]
black_infos: str
BlackCat = Annotated[Union[BlackCatWithHeight, BlackCatWithWeight], Field(discriminator='info')]
class WhiteCat(BaseModel):
pet_type: Literal[PetType.cat]
color: Literal[PetColor.white]
white_infos: str
Cat = Annotated[Union[BlackCat, WhiteCat], Field(discriminator='color')]
class Dog(BaseModel):
pet_type: Literal[PetType.dog]
dog_name: str
Pet = Annotated[Union[Cat, Dog], Field(discriminator='pet_type')]
class Model(BaseModel):
pet: Pet
number: int
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'$defs': {
'BlackCatWithHeight': {
'properties': {
'black_infos': {'title': 'Black Infos', 'type': 'string'},
'color': {'const': 'black', 'title': 'Color', 'type': 'string'},
'info': {'const': 0, 'title': 'Info', 'type': 'integer'},
'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'},
},
'required': ['pet_type', 'color', 'info', 'black_infos'],
'title': 'BlackCatWithHeight',
'type': 'object',
},
'BlackCatWithWeight': {
'properties': {
'black_infos': {'title': 'Black Infos', 'type': 'string'},
'color': {'const': 'black', 'title': 'Color', 'type': 'string'},
'info': {'const': 1, 'title': 'Info', 'type': 'integer'},
'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'},
},
'required': ['pet_type', 'color', 'info', 'black_infos'],
'title': 'BlackCatWithWeight',
'type': 'object',
},
'Dog': {
'properties': {
'dog_name': {'title': 'Dog Name', 'type': 'string'},
'pet_type': {'const': 'dog', 'title': 'Pet Type', 'type': 'string'},
},
'required': ['pet_type', 'dog_name'],
'title': 'Dog',
'type': 'object',
},
'WhiteCat': {
'properties': {
'color': {'const': 'white', 'title': 'Color', 'type': 'string'},
'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'},
'white_infos': {'title': 'White Infos', 'type': 'string'},
},
'required': ['pet_type', 'color', 'white_infos'],
'title': 'WhiteCat',
'type': 'object',
},
},
'properties': {
'number': {'title': 'Number', 'type': 'integer'},
'pet': {
'discriminator': {
'mapping': {
'cat': {
'discriminator': {
'mapping': {
'black': {
'discriminator': {
'mapping': {
'0': '#/$defs/BlackCatWithHeight',
'1': '#/$defs/BlackCatWithWeight',
},
'propertyName': 'info',
},
'oneOf': [
{'$ref': '#/$defs/BlackCatWithHeight'},
{'$ref': '#/$defs/BlackCatWithWeight'},
],
},
'white': '#/$defs/WhiteCat',
},
'propertyName': 'color',
},
'oneOf': [
{
'discriminator': {
'mapping': {
'0': '#/$defs/BlackCatWithHeight',
'1': '#/$defs/BlackCatWithWeight',
},
'propertyName': 'info',
},
'oneOf': [
{'$ref': '#/$defs/BlackCatWithHeight'},
{'$ref': '#/$defs/BlackCatWithWeight'},
],
},
{'$ref': '#/$defs/WhiteCat'},
],
},
'dog': '#/$defs/Dog',
},
'propertyName': 'pet_type',
},
'oneOf': [
{
'discriminator': {
'mapping': {
'black': {
'discriminator': {
'mapping': {
'0': '#/$defs/BlackCatWithHeight',
'1': '#/$defs/BlackCatWithWeight',
},
'propertyName': 'info',
},
'oneOf': [
{'$ref': '#/$defs/BlackCatWithHeight'},
{'$ref': '#/$defs/BlackCatWithWeight'},
],
},
'white': '#/$defs/WhiteCat',
},
'propertyName': 'color',
},
'oneOf': [
{
'discriminator': {
'mapping': {'0': '#/$defs/BlackCatWithHeight', '1': '#/$defs/BlackCatWithWeight'},
'propertyName': 'info',
},
'oneOf': [
{'$ref': '#/$defs/BlackCatWithHeight'},
{'$ref': '#/$defs/BlackCatWithWeight'},
],
},
{'$ref': '#/$defs/WhiteCat'},
],
},
{'$ref': '#/$defs/Dog'},
],
'title': 'Pet',
},
},
'required': ['pet', 'number'],
'title': 'Model',
'type': 'object',
}
def test_alias_same():
class Cat(BaseModel):
pet_type: Literal['cat'] = Field(alias='typeOfPet')
c: str
class Dog(BaseModel):
pet_type: Literal['dog'] = Field(alias='typeOfPet')
d: str
class Model(BaseModel):
pet: Union[Cat, Dog] = Field(discriminator='pet_type')
number: int
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'$defs': {
'Cat': {
'properties': {
'c': {'title': 'C', 'type': 'string'},
'typeOfPet': {'const': 'cat', 'title': 'Typeofpet', 'type': 'string'},
},
'required': ['typeOfPet', 'c'],
'title': 'Cat',
'type': 'object',
},
'Dog': {
'properties': {
'd': {'title': 'D', 'type': 'string'},
'typeOfPet': {'const': 'dog', 'title': 'Typeofpet', 'type': 'string'},
},
'required': ['typeOfPet', 'd'],
'title': 'Dog',
'type': 'object',
},
},
'properties': {
'number': {'title': 'Number', 'type': 'integer'},
'pet': {
'oneOf': [{'$ref': '#/$defs/Cat'}, {'$ref': '#/$defs/Dog'}],
'title': 'Pet',
'discriminator': {'mapping': {'cat': '#/$defs/Cat', 'dog': '#/$defs/Dog'}, 'propertyName': 'typeOfPet'},
},
},
'required': ['pet', 'number'],
'title': 'Model',
'type': 'object',
}
def test_nested_python_dataclasses():
"""
Test schema generation for nested python dataclasses
"""
from dataclasses import dataclass as python_dataclass
@python_dataclass
class ChildModel:
name: str
@python_dataclass
class NestedModel:
"""
Custom description
"""
# Note: the Custom description will not be preserved as this is a vanilla dataclass
# This is the same behavior as in v1
child: list[ChildModel]
# insert_assert(model_json_schema(dataclass(NestedModel)))
assert model_json_schema(dataclass(NestedModel)) == {
'$defs': {
'ChildModel': {
'properties': {'name': {'title': 'Name', 'type': 'string'}},
'required': ['name'],
'title': 'ChildModel',
'type': 'object',
}
},
'properties': {'child': {'items': {'$ref': '#/$defs/ChildModel'}, 'title': 'Child', 'type': 'array'}},
'required': ['child'],
'title': 'NestedModel',
'type': 'object',
}
def test_discriminated_union_in_list():
class BlackCat(BaseModel):
pet_type: Literal['cat']
color: Literal['black']
black_name: str
class WhiteCat(BaseModel):
pet_type: Literal['cat']
color: Literal['white']
white_name: str
Cat = Annotated[Union[BlackCat, WhiteCat], Field(discriminator='color')]
class Dog(BaseModel):
pet_type: Literal['dog']
name: str
Pet = Annotated[Union[Cat, Dog], Field(discriminator='pet_type')]
class Model(BaseModel):
pets: Pet
n: int
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'$defs': {
'BlackCat': {
'properties': {
'black_name': {'title': 'Black Name', 'type': 'string'},
'color': {'const': 'black', 'title': 'Color', 'type': 'string'},
'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'},
},
'required': ['pet_type', 'color', 'black_name'],
'title': 'BlackCat',
'type': 'object',
},
'Dog': {
'properties': {
'name': {'title': 'Name', 'type': 'string'},
'pet_type': {'const': 'dog', 'title': 'Pet Type', 'type': 'string'},
},
'required': ['pet_type', 'name'],
'title': 'Dog',
'type': 'object',
},
'WhiteCat': {
'properties': {
'color': {'const': 'white', 'title': 'Color', 'type': 'string'},
'pet_type': {'const': 'cat', 'title': 'Pet Type', 'type': 'string'},
'white_name': {'title': 'White Name', 'type': 'string'},
},
'required': ['pet_type', 'color', 'white_name'],
'title': 'WhiteCat',
'type': 'object',
},
},
'properties': {
'n': {'title': 'N', 'type': 'integer'},
'pets': {
'discriminator': {
'mapping': {
'cat': {
'discriminator': {
'mapping': {'black': '#/$defs/BlackCat', 'white': '#/$defs/WhiteCat'},
'propertyName': 'color',
},
'oneOf': [{'$ref': '#/$defs/BlackCat'}, {'$ref': '#/$defs/WhiteCat'}],
},
'dog': '#/$defs/Dog',
},
'propertyName': 'pet_type',
},
'oneOf': [
{
'discriminator': {
'mapping': {'black': '#/$defs/BlackCat', 'white': '#/$defs/WhiteCat'},
'propertyName': 'color',
},
'oneOf': [{'$ref': '#/$defs/BlackCat'}, {'$ref': '#/$defs/WhiteCat'}],
},
{'$ref': '#/$defs/Dog'},
],
'title': 'Pets',
},
},
'required': ['pets', 'n'],
'title': 'Model',
'type': 'object',
}
def test_model_with_type_attributes():
class Foo:
a: float
class Bar(BaseModel):
b: int
class Baz(BaseModel):
a: type[Foo]
b: type[Bar]
assert Baz.model_json_schema() == {
'title': 'Baz',
'type': 'object',
'properties': {'a': {'title': 'A'}, 'b': {'title': 'B'}},
'required': ['a', 'b'],
}
@pytest.mark.parametrize('secret_cls', [SecretStr, SecretBytes])
@pytest.mark.parametrize(
'field_kw,schema_kw',
[
# [{}, {}],
[{'min_length': 6}, {'minLength': 6}],
[{'max_length': 10}, {'maxLength': 10}],
[{'min_length': 6, 'max_length': 10}, {'minLength': 6, 'maxLength': 10}],
],
ids=['min-constraint', 'max-constraint', 'min-max-constraints'],
)
def test_secrets_schema(secret_cls, field_kw, schema_kw):
class Foobar(BaseModel):
password: secret_cls = Field(**field_kw)
assert Foobar.model_json_schema() == {
'title': 'Foobar',
'type': 'object',
'properties': {
'password': {'title': 'Password', 'type': 'string', 'writeOnly': True, 'format': 'password', **schema_kw}
},
'required': ['password'],
}
def test_override_generate_json_schema():
class MyGenerateJsonSchema(GenerateJsonSchema):
def generate(self, schema, mode='validation'):
json_schema = super().generate(schema, mode=mode)
json_schema['$schema'] = self.schema_dialect
return json_schema
class MyBaseModel(BaseModel):
@classmethod
def model_json_schema(
cls,
by_alias: bool = True,
ref_template: str = DEFAULT_REF_TEMPLATE,
schema_generator: type[GenerateJsonSchema] = MyGenerateJsonSchema,
mode='validation',
*,
union_format: Literal['any_of', 'primitive_type_array'] = 'any_of',
) -> dict[str, Any]:
return super().model_json_schema(by_alias, ref_template, schema_generator, mode, union_format=union_format)
class MyModel(MyBaseModel):
x: int
assert MyModel.model_json_schema() == {
'$schema': 'https://json-schema.org/draft/2020-12/schema',
'properties': {'x': {'title': 'X', 'type': 'integer'}},
'required': ['x'],
'title': 'MyModel',
'type': 'object',
}
def test_generate_json_schema_generate_twice():
generator = GenerateJsonSchema()
class Model(BaseModel):
title: str
generator.generate(Model.__pydantic_core_schema__)
with pytest.raises(
PydanticUserError,
match=re.escape(
'This JSON schema generator has already been used to generate a JSON schema. '
'You must create a new instance of GenerateJsonSchema to generate a new JSON schema.'
),
):
generator.generate(Model.__pydantic_core_schema__)
generator = GenerateJsonSchema()
generator.generate_definitions([(Model, 'validation', Model.__pydantic_core_schema__)])
with pytest.raises(
PydanticUserError,
match=re.escape(
'This JSON schema generator has already been used to generate a JSON schema. '
'You must create a new instance of GenerateJsonSchema to generate a new JSON schema.'
),
):
generator.generate_definitions([(Model, 'validation', Model.__pydantic_core_schema__)])
def test_nested_default_json_schema():
class InnerModel(BaseModel):
foo: str = 'bar'
baz: str = Field(default='foobar', alias='my_alias')
class OuterModel(BaseModel):
nested_field: InnerModel = InnerModel()
assert OuterModel.model_json_schema() == {
'$defs': {
'InnerModel': {
'properties': {
'foo': {'default': 'bar', 'title': 'Foo', 'type': 'string'},
'my_alias': {'default': 'foobar', 'title': 'My Alias', 'type': 'string'},
},
'title': 'InnerModel',
'type': 'object',
}
},
'properties': {'nested_field': {'$ref': '#/$defs/InnerModel', 'default': {'my_alias': 'foobar', 'foo': 'bar'}}},
'title': 'OuterModel',
'type': 'object',
}
@pytest.mark.xfail(
reason=(
'We are calling __get_pydantic_json_schema__ too many times.'
' The second time we analyze a model we get the CoreSchema from __pydantic_core_schema__.'
' But then we proceed to append to the metadata json schema functions.'
)
)
def test_get_pydantic_core_schema_calls() -> None:
"""Verify when/how many times `__get_pydantic_core_schema__` gets called"""
calls: list[str] = []
class Model(BaseModel):
@classmethod
def __get_pydantic_json_schema__(cls, schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue:
calls.append('Model::before')
json_schema = handler(schema)
calls.append('Model::after')
return json_schema
schema = Model.model_json_schema()
expected: JsonSchemaValue = {'type': 'object', 'properties': {}, 'title': 'Model'}
assert schema == expected
assert calls == ['Model::before', 'Model::after']
calls.clear()
class CustomAnnotation(NamedTuple):
name: str
def __get_pydantic_json_schema__(self, schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue:
calls.append(f'CustomAnnotation({self.name})::before')
json_schema = handler(schema)
calls.append(f'CustomAnnotation({self.name})::after')
return json_schema
AnnotatedType = Annotated[str, CustomAnnotation('foo'), CustomAnnotation('bar')]
schema = TypeAdapter(AnnotatedType).json_schema()
expected: JsonSchemaValue = {'type': 'string'}
assert schema == expected
assert calls == [
'CustomAnnotation(bar)::before',
'CustomAnnotation(foo)::before',
'CustomAnnotation(foo)::after',
'CustomAnnotation(bar)::after',
]
calls.clear()
class OuterModel(BaseModel):
x: Model
@classmethod
def __get_pydantic_json_schema__(cls, schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue:
calls.append('OuterModel::before')
json_schema = handler(schema)
calls.append('OuterModel::after')
return json_schema
schema = OuterModel.model_json_schema()
expected: JsonSchemaValue = {
'type': 'object',
'properties': {'x': {'$ref': '#/$defs/Model'}},
'required': ['x'],
'title': 'OuterModel',
'$defs': {'Model': {'type': 'object', 'properties': {}, 'title': 'Model'}},
}
assert schema == expected
assert calls == [
'OuterModel::before',
'Model::before',
'Model::after',
'OuterModel::after',
]
calls.clear()
AnnotatedModel = Annotated[Model, CustomAnnotation('foo')]
schema = TypeAdapter(AnnotatedModel).json_schema()
expected: JsonSchemaValue = {}
assert schema == expected
assert calls == [
'CustomAnnotation(foo)::before',
'Model::before',
'Model::after',
'CustomAnnotation(foo)::after',
]
calls.clear()
class OuterModelWithAnnotatedField(BaseModel):
x: AnnotatedModel
schema = OuterModelWithAnnotatedField.model_json_schema()
expected: JsonSchemaValue = {
'type': 'object',
'properties': {'x': {'$ref': '#/$defs/Model'}},
'required': ['x'],
'title': 'OuterModel',
'$defs': {'Model': {'type': 'object', 'properties': {}, 'title': 'Model'}},
}
assert schema == expected
assert calls == [
'OuterModel::before',
'CustomAnnotation(foo)::before',
'Model::before',
'Model::after',
'CustomAnnotation(foo)::after',
'OuterModel::after',
]
calls.clear()
def test_annotated_get_json_schema() -> None:
calls: list[int] = []
class CustomType(str):
@classmethod
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
return handler(str)
@classmethod
def __get_pydantic_json_schema__(cls, schema: CoreSchema, handler: GetJsonSchemaHandler) -> JsonSchemaValue:
calls.append(1)
json_schema = handler(schema)
return json_schema
TypeAdapter(Annotated[CustomType, 123]).json_schema()
assert sum(calls) == 1
def test_model_with_strict_mode():
class Model(BaseModel):
model_config = ConfigDict(strict=True)
a: str
assert Model.model_json_schema() == {
'properties': {'a': {'title': 'A', 'type': 'string'}},
'required': ['a'],
'title': 'Model',
'type': 'object',
}
def test_model_with_schema_extra():
class Model(BaseModel):
a: str
model_config = dict(json_schema_extra={'examples': [{'a': 'Foo'}]})
assert Model.model_json_schema() == {
'title': 'Model',
'type': 'object',
'properties': {'a': {'title': 'A', 'type': 'string'}},
'required': ['a'],
'examples': [{'a': 'Foo'}],
}
@pytest.mark.skip_json_schema_validation(reason='Custom type used.')
def test_model_with_schema_extra_callable():
class Model(BaseModel):
name: str = None
@staticmethod
def json_schema_extra(schema, model_class):
schema.pop('properties')
schema['type'] = 'override'
assert model_class is Model
model_config = dict(json_schema_extra=json_schema_extra)
assert Model.model_json_schema() == {'title': 'Model', 'type': 'override'}
@pytest.mark.skip_json_schema_validation(reason='Custom type used.')
def test_model_with_schema_extra_callable_no_model_class():
class Model(BaseModel):
name: str = None
@classmethod
def json_schema_extra(cls, schema):
schema.pop('properties')
schema['type'] = 'override'
model_config = dict(json_schema_extra=json_schema_extra)
assert Model.model_json_schema() == {'title': 'Model', 'type': 'override'}
@pytest.mark.skip_json_schema_validation(reason='Custom type used.')
def test_model_with_schema_extra_callable_config_class():
with pytest.warns(PydanticDeprecatedSince20, match='use ConfigDict instead'):
class Model(BaseModel):
name: str = None
class Config:
@staticmethod
def json_schema_extra(schema, model_class):
schema.pop('properties')
schema['type'] = 'override'
assert model_class is Model
assert Model.model_json_schema() == {'title': 'Model', 'type': 'override'}
@pytest.mark.skip_json_schema_validation(reason='Custom type used.')
def test_model_with_schema_extra_callable_no_model_class_config_class():
with pytest.warns(PydanticDeprecatedSince20):
class Model(BaseModel):
name: str = None
class Config:
@staticmethod
def json_schema_extra(schema):
schema.pop('properties')
schema['type'] = 'override'
assert Model.model_json_schema() == {'title': 'Model', 'type': 'override'}
@pytest.mark.skip_json_schema_validation(reason='Custom type used.')
def test_model_with_schema_extra_callable_classmethod():
with pytest.warns(PydanticDeprecatedSince20):
class Model(BaseModel):
name: str = None
class Config:
type = 'foo'
@classmethod
def json_schema_extra(cls, schema, model_class):
schema.pop('properties')
schema['type'] = cls.type
assert model_class is Model
assert Model.model_json_schema() == {'title': 'Model', 'type': 'foo'}
@pytest.mark.skip_json_schema_validation(reason='Custom type used.')
def test_model_with_schema_extra_callable_instance_method():
with pytest.warns(PydanticDeprecatedSince20):
class Model(BaseModel):
name: str = None
class Config:
def json_schema_extra(schema, model_class):
schema.pop('properties')
schema['type'] = 'override'
assert model_class is Model
assert Model.model_json_schema() == {'title': 'Model', 'type': 'override'}
def test_serialization_validation_interaction():
class Inner(BaseModel):
x: Json[int]
class Outer(BaseModel):
inner: Inner
_, v_schema = models_json_schema([(Outer, 'validation')])
assert v_schema == {
'$defs': {
'Inner': {
'properties': {
'x': {
'contentMediaType': 'application/json',
'contentSchema': {'type': 'integer'},
'title': 'X',
'type': 'string',
}
},
'required': ['x'],
'title': 'Inner',
'type': 'object',
},
'Outer': {
'properties': {'inner': {'$ref': '#/$defs/Inner'}},
'required': ['inner'],
'title': 'Outer',
'type': 'object',
},
}
}
_, s_schema = models_json_schema([(Outer, 'serialization')])
assert s_schema == {
'$defs': {
'Inner': {
'properties': {'x': {'title': 'X', 'type': 'integer'}},
'required': ['x'],
'title': 'Inner',
'type': 'object',
},
'Outer': {
'properties': {'inner': {'$ref': '#/$defs/Inner'}},
'required': ['inner'],
'title': 'Outer',
'type': 'object',
},
}
}
_, vs_schema = models_json_schema([(Outer, 'validation'), (Outer, 'serialization')])
assert vs_schema == {
'$defs': {
'Inner-Input': {
'properties': {
'x': {
'contentMediaType': 'application/json',
'contentSchema': {'type': 'integer'},
'title': 'X',
'type': 'string',
}
},
'required': ['x'],
'title': 'Inner',
'type': 'object',
},
'Inner-Output': {
'properties': {'x': {'title': 'X', 'type': 'integer'}},
'required': ['x'],
'title': 'Inner',
'type': 'object',
},
'Outer-Input': {
'properties': {'inner': {'$ref': '#/$defs/Inner-Input'}},
'required': ['inner'],
'title': 'Outer',
'type': 'object',
},
'Outer-Output': {
'properties': {'inner': {'$ref': '#/$defs/Inner-Output'}},
'required': ['inner'],
'title': 'Outer',
'type': 'object',
},
}
}
def test_extras_and_examples_are_json_encoded():
class Toy(BaseModel):
name: Annotated[str, Field(examples=['mouse', 'ball'])]
class Cat(BaseModel):
toys: Annotated[
list[Toy],
Field(examples=[[Toy(name='mouse'), Toy(name='ball')]], json_schema_extra={'special': Toy(name='bird')}),
]
assert Cat.model_json_schema()['properties']['toys']['examples'] == [[{'name': 'mouse'}, {'name': 'ball'}]]
assert Cat.model_json_schema()['properties']['toys']['special'] == {'name': 'bird'}
def test_computed_field():
class Model(BaseModel):
x: int
@computed_field
@property
def double_x(self) -> int:
return 2 * self.x
assert Model.model_json_schema(mode='validation') == {
'properties': {'x': {'title': 'X', 'type': 'integer'}},
'required': ['x'],
'title': 'Model',
'type': 'object',
}
assert Model.model_json_schema(mode='serialization') == {
'properties': {
'double_x': {'readOnly': True, 'title': 'Double X', 'type': 'integer'},
'x': {'title': 'X', 'type': 'integer'},
},
'required': ['x', 'double_x'],
'title': 'Model',
'type': 'object',
}
def test_serialization_schema_with_exclude_exclude_if():
class JsonSchemaFieldAlwaysPresent(GenerateJsonSchema):
def field_is_present(self, field) -> bool:
# Always include fields in the JSON schema, even if excluded from serialization
return True
class ModelSerDefaultsNotRequired(BaseModel, json_schema_serialization_defaults_required=False): # The default
a: int
b: int = 1
c: int = Field(exclude=True)
d: int = Field(default=1, exclude=True)
e: int = Field(exclude_if=lambda v: v)
f: int = Field(default=1, exclude_if=lambda v: v)
assert ModelSerDefaultsNotRequired.model_json_schema(mode='serialization') == {
'properties': {
'a': {'title': 'A', 'type': 'integer'},
'b': {'default': 1, 'title': 'B', 'type': 'integer'},
'e': {'title': 'E', 'type': 'integer'},
'f': {'default': 1, 'title': 'F', 'type': 'integer'},
},
'required': ['a'],
'title': 'ModelSerDefaultsNotRequired',
'type': 'object',
}
assert ModelSerDefaultsNotRequired.model_json_schema(
mode='serialization', schema_generator=JsonSchemaFieldAlwaysPresent
) == {
'properties': {
'a': {'title': 'A', 'type': 'integer'},
'b': {'default': 1, 'title': 'B', 'type': 'integer'},
'c': {'title': 'C', 'type': 'integer'},
'd': {'default': 1, 'title': 'D', 'type': 'integer'},
'e': {'title': 'E', 'type': 'integer'},
'f': {'default': 1, 'title': 'F', 'type': 'integer'},
},
'required': ['a', 'c'],
'title': 'ModelSerDefaultsNotRequired',
'type': 'object',
}
class ModelSerDefaultsRequired(BaseModel, json_schema_serialization_defaults_required=True):
a: int
b: int = 1
c: int = Field(exclude=True)
d: int = Field(default=1, exclude=True)
e: int = Field(exclude_if=lambda v: v)
f: int = Field(default=1, exclude_if=lambda v: v)
assert ModelSerDefaultsRequired.model_json_schema(mode='serialization') == {
'properties': {
'a': {'title': 'A', 'type': 'integer'},
'b': {'default': 1, 'title': 'B', 'type': 'integer'},
'e': {'title': 'E', 'type': 'integer'},
'f': {'default': 1, 'title': 'F', 'type': 'integer'},
},
'required': ['a', 'b'],
'title': 'ModelSerDefaultsRequired',
'type': 'object',
}
assert ModelSerDefaultsRequired.model_json_schema(
mode='serialization', schema_generator=JsonSchemaFieldAlwaysPresent
) == {
'properties': {
'a': {'title': 'A', 'type': 'integer'},
'b': {'default': 1, 'title': 'B', 'type': 'integer'},
'c': {'title': 'C', 'type': 'integer'},
'd': {'default': 1, 'title': 'D', 'type': 'integer'},
'e': {'title': 'E', 'type': 'integer'},
'f': {'default': 1, 'title': 'F', 'type': 'integer'},
},
'required': ['a', 'b', 'c', 'd'],
'title': 'ModelSerDefaultsRequired',
'type': 'object',
}
@pytest.mark.parametrize('mapping_type', [dict, typing.Mapping])
def test_mappings_str_int_json_schema(mapping_type: Any):
class Model(BaseModel):
str_int_map: mapping_type[str, int]
assert Model.model_json_schema() == {
'title': 'Model',
'type': 'object',
'properties': {
'str_int_map': {
'title': 'Str Int Map',
'type': 'object',
'additionalProperties': {'type': 'integer'},
}
},
'required': ['str_int_map'],
}
@pytest.mark.parametrize(('sequence_type'), [pytest.param(list), pytest.param(Sequence)])
def test_sequence_schema(sequence_type):
class Model(BaseModel):
field: sequence_type[int]
assert Model.model_json_schema() == {
'properties': {
'field': {'items': {'type': 'integer'}, 'title': 'Field', 'type': 'array'},
},
'required': ['field'],
'title': 'Model',
'type': 'object',
}
@pytest.mark.parametrize(('sequence_type',), [pytest.param(list), pytest.param(Sequence)])
def test_sequence_schema_with_max_length(sequence_type):
class Model(BaseModel):
field: sequence_type[int] = Field(max_length=5)
assert Model.model_json_schema() == {
'properties': {
'field': {'items': {'type': 'integer'}, 'maxItems': 5, 'title': 'Field', 'type': 'array'},
},
'required': ['field'],
'title': 'Model',
'type': 'object',
}
@pytest.mark.parametrize(('sequence_type',), [pytest.param(list), pytest.param(Sequence)])
def test_sequence_schema_with_min_length(sequence_type):
class Model(BaseModel):
field: sequence_type[int] = Field(min_length=1)
assert Model.model_json_schema() == {
'properties': {
'field': {'items': {'type': 'integer'}, 'minItems': 1, 'title': 'Field', 'type': 'array'},
},
'required': ['field'],
'title': 'Model',
'type': 'object',
}
@pytest.mark.parametrize(('sequence_type',), [pytest.param(list), pytest.param(Sequence)])
def test_sequences_int_json_schema(sequence_type):
class Model(BaseModel):
int_seq: sequence_type[int]
assert Model.model_json_schema() == {
'title': 'Model',
'type': 'object',
'properties': {
'int_seq': {
'title': 'Int Seq',
'type': 'array',
'items': {'type': 'integer'},
},
},
'required': ['int_seq'],
}
assert Model.model_validate_json('{"int_seq": [1, 2, 3]}')
@pytest.mark.parametrize(
'field_schema,model_schema',
[
(None, {'properties': {}, 'title': 'Model', 'type': 'object'}),
(
{'a': 'b'},
{'properties': {'x': {'a': 'b', 'title': 'X'}}, 'required': ['x'], 'title': 'Model', 'type': 'object'},
),
],
)
@pytest.mark.parametrize('instance_of', [True, False])
def test_arbitrary_type_json_schema(field_schema, model_schema, instance_of):
class ArbitraryClass:
pass
if instance_of:
class Model(BaseModel):
x: Annotated[InstanceOf[ArbitraryClass], WithJsonSchema(field_schema)]
else:
class Model(BaseModel):
model_config = dict(arbitrary_types_allowed=True)
x: Annotated[ArbitraryClass, WithJsonSchema(field_schema)]
assert Model.model_json_schema() == model_schema
@pytest.mark.parametrize(
'metadata,json_schema',
[
(
WithJsonSchema({'type': 'number'}),
{
'properties': {'x': {'anyOf': [{'type': 'number'}, {'type': 'null'}], 'title': 'X'}},
'required': ['x'],
'title': 'Model',
'type': 'object',
},
),
(
Examples([1, 2, 3]),
{
'properties': {
'x': {
'anyOf': [{'examples': [1, 2, 3], 'type': 'integer'}, {'type': 'null'}],
'title': 'X',
}
},
'required': ['x'],
'title': 'Model',
'type': 'object',
},
),
],
)
def test_hashable_types(metadata, json_schema):
class Model(BaseModel):
x: Union[Annotated[int, metadata], None]
assert Model.model_json_schema() == json_schema
def test_root_model():
class A(RootModel[int]):
"""A Model docstring"""
assert A.model_json_schema() == {'title': 'A', 'description': 'A Model docstring', 'type': 'integer'}
class B(RootModel[A]):
pass
assert B.model_json_schema() == {
'$defs': {'A': {'description': 'A Model docstring', 'title': 'A', 'type': 'integer'}},
'$ref': '#/$defs/A',
'title': 'B',
}
class C(RootModel[A]):
"""C Model docstring"""
assert C.model_json_schema() == {
'$defs': {'A': {'description': 'A Model docstring', 'title': 'A', 'type': 'integer'}},
'$ref': '#/$defs/A',
'title': 'C',
'description': 'C Model docstring',
}
def test_type_adapter_json_schemas_title_description():
class Model(BaseModel):
a: str
_, json_schema = TypeAdapter.json_schemas([(Model, 'validation', TypeAdapter(Model))])
assert 'title' not in json_schema
assert 'description' not in json_schema
_, json_schema = TypeAdapter.json_schemas(
[(Model, 'validation', TypeAdapter(Model))],
title='test title',
description='test description',
)
assert json_schema['title'] == 'test title'
assert json_schema['description'] == 'test description'
def test_type_adapter_json_schemas_without_definitions():
_, json_schema = TypeAdapter.json_schemas(
[(int, 'validation', TypeAdapter(int))],
ref_template='#/components/schemas/{model}',
)
assert 'definitions' not in json_schema
def test_custom_chain_schema():
class MySequence:
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> CoreSchema:
list_schema = core_schema.list_schema()
return core_schema.chain_schema([list_schema])
class Model(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
a: MySequence
assert Model.model_json_schema() == {
'properties': {'a': {'items': {}, 'title': 'A', 'type': 'array'}},
'required': ['a'],
'title': 'Model',
'type': 'object',
}
def test_json_or_python_schema():
class MyJsonOrPython:
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> CoreSchema:
int_schema = core_schema.int_schema()
return core_schema.json_or_python_schema(json_schema=int_schema, python_schema=int_schema)
class Model(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
a: MyJsonOrPython
assert Model.model_json_schema() == {
'properties': {'a': {'title': 'A', 'type': 'integer'}},
'required': ['a'],
'title': 'Model',
'type': 'object',
}
def test_lax_or_strict_schema():
class MyLaxOrStrict:
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> CoreSchema:
int_schema = core_schema.int_schema()
return core_schema.lax_or_strict_schema(lax_schema=int_schema, strict_schema=int_schema, strict=True)
class Model(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
a: MyLaxOrStrict
assert Model.model_json_schema() == {
'properties': {'a': {'title': 'A', 'type': 'integer'}},
'required': ['a'],
'title': 'Model',
'type': 'object',
}
def test_override_enum_json_schema():
class CustomType(Enum):
A = 'a'
B = 'b'
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> core_schema.CoreSchema:
json_schema = handler(core_schema)
json_schema.update(title='CustomType title', type='string')
return json_schema
class Model(BaseModel):
x: CustomType
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'$defs': {'CustomType': {'enum': ['a', 'b'], 'title': 'CustomType title', 'type': 'string'}},
'properties': {'x': {'$ref': '#/$defs/CustomType'}},
'required': ['x'],
'title': 'Model',
'type': 'object',
}
def test_json_schema_extras_on_ref() -> None:
@dataclass
class JsonSchemaExamples:
examples: list[Any]
def __get_pydantic_json_schema__(
self, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
json_schema = handler(core_schema)
assert json_schema.keys() == {'$ref'}
json_schema['examples'] = to_jsonable_python(self.examples)
return json_schema
@dataclass
class JsonSchemaTitle:
title: str
def __get_pydantic_json_schema__(
self, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
json_schema = handler(core_schema)
assert json_schema.keys() == {'$ref', 'examples'}
json_schema['title'] = self.title
return json_schema
class Model(BaseModel):
name: str
age: int
ta = TypeAdapter(Annotated[Model, JsonSchemaExamples([Model(name='John', age=28)]), JsonSchemaTitle('ModelTitle')])
# insert_assert(ta.json_schema())
assert ta.json_schema() == {
'$defs': {
'Model': {
'properties': {'age': {'title': 'Age', 'type': 'integer'}, 'name': {'title': 'Name', 'type': 'string'}},
'required': ['name', 'age'],
'title': 'Model',
'type': 'object',
}
},
'$ref': '#/$defs/Model',
'examples': [{'name': 'John', 'age': 28}],
'title': 'ModelTitle',
}
def test_inclusion_of_defaults():
class Model(BaseModel):
x: int = 1
y: int = Field(default_factory=lambda: 2)
assert Model.model_json_schema() == {
'properties': {'x': {'default': 1, 'title': 'X', 'type': 'integer'}, 'y': {'title': 'Y', 'type': 'integer'}},
'title': 'Model',
'type': 'object',
}
class AllDefaults(GenerateJsonSchema):
def get_default_value(self, schema: core_schema.WithDefaultSchema) -> Any:
if 'default' in schema:
return schema['default']
elif 'default_factory' in schema:
# Users should also account for default factories taking validated data
return schema['default_factory']()
return NoDefault
assert Model.model_json_schema(schema_generator=AllDefaults) == {
'properties': {
'x': {'default': 1, 'title': 'X', 'type': 'integer'},
'y': {'default': 2, 'title': 'Y', 'type': 'integer'},
},
'title': 'Model',
'type': 'object',
}
def test_resolve_def_schema_from_core_schema() -> None:
class Inner(BaseModel):
x: int
class Marker:
def __get_pydantic_json_schema__(
self, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
field_schema = handler(core_schema)
field_schema['title'] = 'Foo'
original_schema = handler.resolve_ref_schema(field_schema)
original_schema['title'] = 'Bar'
return field_schema
class Outer(BaseModel):
inner: Annotated[Inner, Marker()]
# insert_assert(Outer.model_json_schema())
assert Outer.model_json_schema() == {
'$defs': {
'Inner': {
'properties': {'x': {'title': 'X', 'type': 'integer'}},
'required': ['x'],
'title': 'Bar',
'type': 'object',
}
},
'properties': {'inner': {'$ref': '#/$defs/Inner', 'title': 'Foo'}},
'required': ['inner'],
'title': 'Outer',
'type': 'object',
}
def test_examples_annotation() -> None:
ListWithExamples = Annotated[
list[float],
Examples([[1, 1, 2, 3, 5], [1, 2, 3]]),
]
ta = TypeAdapter(ListWithExamples)
assert ta.json_schema() == {
'examples': [[1, 1, 2, 3, 5], [1, 2, 3]],
'items': {'type': 'number'},
'type': 'array',
}
ListWithExtraExample = Annotated[
ListWithExamples,
Examples([[3.14, 2.71]]),
]
ta = TypeAdapter(ListWithExtraExample)
assert ta.json_schema() == {
'examples': [[1, 1, 2, 3, 5], [1, 2, 3], [3.14, 2.71]],
'items': {'type': 'number'},
'type': 'array',
}
@pytest.mark.skip_json_schema_validation(reason='Uses old examples format, planned for removal in v3.0.')
def test_examples_annotation_dict() -> None:
with pytest.warns(PydanticDeprecatedSince29):
ListWithExamples = Annotated[
list[float],
Examples({'Fibonacci': [1, 1, 2, 3, 5]}),
]
ta = TypeAdapter(ListWithExamples)
# insert_assert(ta.json_schema())
assert ta.json_schema() == {
'examples': {'Fibonacci': [1, 1, 2, 3, 5]},
'items': {'type': 'number'},
'type': 'array',
}
with pytest.warns(PydanticDeprecatedSince29):
ListWithMoreExamples = Annotated[
ListWithExamples,
Examples(
{
'Constants': [
3.14,
2.71,
]
}
),
]
ta = TypeAdapter(ListWithMoreExamples)
assert ta.json_schema() == {
'examples': {'Constants': [3.14, 2.71], 'Fibonacci': [1, 1, 2, 3, 5]},
'items': {'type': 'number'},
'type': 'array',
}
def test_examples_mixed_types() -> None:
with pytest.warns(PydanticDeprecatedSince29):
ListThenDict = Annotated[
int,
Examples([1, 2]),
Examples({'some_example': [3, 4]}),
]
DictThenList = Annotated[
int,
Examples({'some_example': [3, 4]}),
Examples([1, 2]),
]
list_then_dict_ta = TypeAdapter(ListThenDict)
dict_then_list_ta = TypeAdapter(DictThenList)
with pytest.warns(
UserWarning,
match=re.escape('Updating existing JSON Schema examples of type list with examples of type dict.'),
):
assert list_then_dict_ta.json_schema() == {
'examples': [1, 2, 3, 4],
'type': 'integer',
}
with pytest.warns(
UserWarning,
match=re.escape('Updating existing JSON Schema examples of type dict with examples of type list.'),
):
assert dict_then_list_ta.json_schema() == {
'examples': [3, 4, 1, 2],
'type': 'integer',
}
def test_skip_json_schema_annotation() -> None:
class Model(BaseModel):
x: Union[int, SkipJsonSchema[None]] = None
y: Union[int, SkipJsonSchema[None]] = 1
z: Union[int, SkipJsonSchema[str]] = 'foo'
assert Model(y=None).y is None
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'properties': {
'x': {'default': None, 'title': 'X', 'type': 'integer'},
'y': {'default': 1, 'title': 'Y', 'type': 'integer'},
'z': {'default': 'foo', 'title': 'Z', 'type': 'integer'},
},
'title': 'Model',
'type': 'object',
}
def test_skip_json_schema_exclude_default():
class Model(BaseModel):
x: Union[int, SkipJsonSchema[None]] = Field(default=None, json_schema_extra=lambda s: s.pop('default'))
assert Model().x is None
# insert_assert(Model.model_json_schema())
assert Model.model_json_schema() == {
'properties': {
'x': {'title': 'X', 'type': 'integer'},
},
'title': 'Model',
'type': 'object',
}
def test_typeddict_field_required_missing() -> None:
"""https://github.com/pydantic/pydantic/issues/6192"""
class CustomType:
def __init__(self, data: dict[str, int]) -> None:
self.data = data
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> CoreSchema:
data_schema = core_schema.typed_dict_schema(
{
'subunits': core_schema.typed_dict_field(
core_schema.int_schema(),
),
}
)
return core_schema.no_info_after_validator_function(cls, data_schema)
class Model(BaseModel):
t: CustomType
m = Model(t={'subunits': 123})
assert type(m.t) is CustomType
assert m.t.data == {'subunits': 123}
with pytest.raises(ValidationError) as exc_info:
Model(t={'subunits': 'abc'})
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'int_parsing',
'loc': ('t', 'subunits'),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'abc',
}
]
def test_json_schema_keys_sorting() -> None:
"""We sort all keys except those under a 'property' parent key"""
class Model(BaseModel):
b: int
a: str
class OuterModel(BaseModel):
inner: list[Model] = Field(default=[Model(b=1, a='fruit')])
# verify the schema contents
# this is just to get a nicer error message / diff if it fails
expected = {
'$defs': {
'Model': {
'properties': {'b': {'title': 'B', 'type': 'integer'}, 'a': {'title': 'A', 'type': 'string'}},
'required': ['b', 'a'],
'title': 'Model',
'type': 'object',
}
},
'properties': {
'inner': {
'default': [{'b': 1, 'a': 'fruit'}],
'items': {'$ref': '#/$defs/Model'},
'title': 'Inner',
'type': 'array',
}
},
'title': 'OuterModel',
'type': 'object',
}
actual = OuterModel.model_json_schema()
assert actual == expected
# verify order
# dumping to json just happens to be a simple way to verify the order
assert json.dumps(actual, indent=2) == json.dumps(expected, indent=2)
def test_custom_type_gets_unpacked_ref() -> None:
class Annotation:
def __get_pydantic_json_schema__(
self, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
json_schema = handler(schema)
json_schema['title'] = 'Set from annotation'
return json_schema
class Model(BaseModel):
x: int
@classmethod
def __get_pydantic_json_schema__(
cls, schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
json_schema = handler(schema)
return json_schema
ta = TypeAdapter(Annotated[Model, Annotation()])
# insert_assert(ta.json_schema())
assert ta.json_schema() == {
'$defs': {
'Model': {
'properties': {'x': {'title': 'X', 'type': 'integer'}},
'required': ['x'],
'title': 'Model',
'type': 'object',
}
},
'$ref': '#/$defs/Model',
'title': 'Set from annotation',
}
@pytest.mark.parametrize(
'annotation, expected',
[
(Annotated[int, Field(json_schema_extra={'title': 'abc'})], {'type': 'integer', 'title': 'abc'}),
(
Annotated[int, Field(title='abc'), Field(description='xyz')],
{'type': 'integer', 'title': 'abc', 'description': 'xyz'},
),
(Annotated[int, Field(gt=0)], {'type': 'integer', 'exclusiveMinimum': 0}),
(
Annotated[int, Field(gt=0), Field(lt=100)],
{'type': 'integer', 'exclusiveMinimum': 0, 'exclusiveMaximum': 100},
),
(Annotated[int, Field(examples=[1])], {'type': 'integer', 'examples': [1]}),
],
ids=repr,
)
def test_field_json_schema_metadata(annotation: type[Any], expected: JsonSchemaValue) -> None:
ta = TypeAdapter(annotation)
assert ta.json_schema() == expected
def test_multiple_models_with_same_qualname():
from pydantic import create_model
model_a1 = create_model(
'A',
inner_a1=(str, ...),
)
model_a2 = create_model(
'A',
inner_a2=(str, ...),
)
model_c = create_model(
'B',
outer_a1=(model_a1, ...),
outer_a2=(model_a2, ...),
)
# insert_assert(model_c.model_json_schema())
assert model_c.model_json_schema() == {
'$defs': {
'tests__test_json_schema__A__1': {
'properties': {'inner_a1': {'title': 'Inner A1', 'type': 'string'}},
'required': ['inner_a1'],
'title': 'A',
'type': 'object',
},
'tests__test_json_schema__A__2': {
'properties': {'inner_a2': {'title': 'Inner A2', 'type': 'string'}},
'required': ['inner_a2'],
'title': 'A',
'type': 'object',
},
},
'properties': {
'outer_a1': {'$ref': '#/$defs/tests__test_json_schema__A__1'},
'outer_a2': {'$ref': '#/$defs/tests__test_json_schema__A__2'},
},
'required': ['outer_a1', 'outer_a2'],
'title': 'B',
'type': 'object',
}
def test_generate_definitions_for_no_ref_schemas():
decimal_schema = TypeAdapter(Decimal).core_schema
class Model(BaseModel):
pass
result = GenerateJsonSchema().generate_definitions(
[
('Decimal', 'validation', decimal_schema),
('Decimal', 'serialization', decimal_schema),
('Model', 'validation', Model.__pydantic_core_schema__),
]
)
assert result == (
{
('Decimal', 'serialization'): {
'type': 'string',
'pattern': '^(?!^[-+.]*$)[+-]?0*\\d*\\.?\\d*$',
},
('Decimal', 'validation'): {
'anyOf': [
{'type': 'number'},
{
'type': 'string',
'pattern': '^(?!^[-+.]*$)[+-]?0*\\d*\\.?\\d*$',
},
]
},
('Model', 'validation'): {'$ref': '#/$defs/Model'},
},
{'Model': {'properties': {}, 'title': 'Model', 'type': 'object'}},
)
def test_chain_schema():
# this is a contrived schema which requires a string input that can be coerced to an int:
s = core_schema.chain_schema([core_schema.str_schema(), core_schema.int_schema()])
assert SchemaValidator(s).validate_python('1') == 1 # proof it works this way
assert GenerateJsonSchema().generate(s, mode='validation') == {'type': 'string'}
assert GenerateJsonSchema().generate(s, mode='serialization') == {'type': 'integer'}
def test_deferred_json_schema():
class Foo(BaseModel):
x: 'Bar'
with pytest.raises(PydanticUserError, match='`Foo` is not fully defined'):
Foo.model_json_schema()
class Bar(BaseModel):
pass
Foo.model_rebuild()
assert Foo.model_json_schema() == {
'$defs': {'Bar': {'properties': {}, 'title': 'Bar', 'type': 'object'}},
'properties': {'x': {'$ref': '#/$defs/Bar'}},
'required': ['x'],
'title': 'Foo',
'type': 'object',
}
def test_dollar_ref_alias():
class MyModel(BaseModel):
my_field: str = Field(alias='$ref')
assert MyModel.model_json_schema() == {
'properties': {'$ref': {'title': '$Ref', 'type': 'string'}},
'required': ['$ref'],
'title': 'MyModel',
'type': 'object',
}
def test_multiple_parametrization_of_generic_model() -> None:
"""https://github.com/pydantic/pydantic/issues/6708"""
T = TypeVar('T')
calls = 0
class Inner(BaseModel):
a: int
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
nonlocal calls
calls += 1
json_schema = handler(core_schema)
return json_schema
class Outer(BaseModel, Generic[T]):
b: Optional[T]
class ModelTest(BaseModel):
c: Outer[Inner]
for _ in range(sys.getrecursionlimit() + 1):
class ModelTest(BaseModel):
c: Outer[Inner]
ModelTest.model_json_schema()
# this is not necessarily a promise we make
# (in fact, we've had bugs in the past where this was not the case and we'd
# call the __get_pydantic_json_schema__ method multiple times)
# but it's much easier to test for than absence of a recursion limit
assert calls == 1
def test_callable_json_schema_extra():
def pop_default(s):
s.pop('default')
class Model(BaseModel):
a: int = Field(default=1, json_schema_extra=pop_default)
b: Annotated[int, Field(default=2), Field(json_schema_extra=pop_default)]
c: Annotated[int, Field(default=3)] = Field(json_schema_extra=pop_default)
assert Model().model_dump() == {'a': 1, 'b': 2, 'c': 3}
assert Model(a=11, b=12, c=13).model_dump() == {
'a': 11,
'b': 12,
'c': 13,
}
json_schema = Model.model_json_schema()
for key in 'abc':
assert json_schema['properties'][key] == {'title': key.upper(), 'type': 'integer'} # default is not present
def test_callable_json_schema_extra_dataclass():
def pop_default(s):
s.pop('default')
@pydantic.dataclasses.dataclass
class MyDataclass:
# Note that a and b here have to come first since dataclasses requires annotation-only fields to come before
# fields with defaults (for similar reasons to why function arguments with defaults must come later)
# But otherwise, evnerything seems to work properly
a: Annotated[int, Field(json_schema_extra=pop_default), Field(default=1)]
b: Annotated[int, Field(default=2), Field(json_schema_extra=pop_default)]
c: int = Field(default=3, json_schema_extra=pop_default)
d: Annotated[int, Field(json_schema_extra=pop_default)] = 4
e: Annotated[int, Field(json_schema_extra=pop_default)] = Field(default=5)
f: Annotated[int, Field(default=6)] = Field(json_schema_extra=pop_default)
adapter = TypeAdapter(MyDataclass)
assert adapter.dump_python(MyDataclass()) == {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6}
assert adapter.dump_python(MyDataclass(a=11, b=12, c=13, d=14, e=15, f=16)) == {
'a': 11,
'b': 12,
'c': 13,
'd': 14,
'e': 15,
'f': 16,
}
json_schema = adapter.json_schema()
for key in 'abcdef':
assert json_schema['properties'][key] == {'title': key.upper(), 'type': 'integer'} # default is not present
def test_model_rebuild_happens_even_with_parent_classes(create_module):
module = create_module(
# language=Python
"""
from __future__ import annotations
from pydantic import BaseModel
|
MyModel
|
python
|
sympy__sympy
|
sympy/polys/agca/modules.py
|
{
"start": 12822,
"end": 14257
}
|
class ____(FreeModule):
"""
Free module over a generalized polynomial ring.
Do not instantiate this, use the constructor method of the ring instead:
Examples
========
>>> from sympy.abc import x
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x).free_module(3)
>>> F
QQ[x]**3
>>> F.contains([x, 1, 0])
True
>>> F.contains([1/x, 0, 1])
False
"""
def __init__(self, ring, rank):
from sympy.polys.domains.old_polynomialring import PolynomialRingBase
FreeModule.__init__(self, ring, rank)
if not isinstance(ring, PolynomialRingBase):
raise NotImplementedError('This implementation only works over '
+ 'polynomial rings, got %s' % ring)
if not isinstance(ring.dom, Field):
raise NotImplementedError('Ground domain must be a field, '
+ 'got %s' % ring.dom)
def submodule(self, *gens, **opts):
"""
Generate a submodule.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> M = QQ.old_poly_ring(x, y).free_module(2).submodule([x, x + y])
>>> M
<[x, x + y]>
>>> M.contains([2*x, 2*x + 2*y])
True
>>> M.contains([x, y])
False
"""
return SubModulePolyRing(gens, self, **opts)
|
FreeModulePolyRing
|
python
|
coleifer__peewee
|
tests/prefetch_tests.py
|
{
"start": 21761,
"end": 25610
}
|
class ____(ModelTestCase):
database = get_in_memory_db()
requires = [X, Z, A, B, C, C1, C2]
def test_prefetch_multirefs(self):
x1, x2, x3 = [X.create(name=n) for n in ('x1', 'x2', 'x3')]
for i, x in enumerate((x1, x2, x3), 1):
for j in range(i):
Z.create(x=x, name='%s-z%s' % (x.name, j))
xs = {x.name: x for x in X.select()}
xs[None] = None
data = [
('a1',
'x1',
['x1-z0'],
[
('a1-b1', 'x1', ['x1-z0'], [
('a1-b1-c1', 'x1', ['x1-z0'], [], []),
]),
]),
('a2',
'x2',
['x2-z0', 'x2-z1'],
[
('a2-b1', 'x1', ['x1-z0'], [
('a2-b1-c1', 'x1', ['x1-z0'], [], []),
]),
('a2-b2', 'x2', ['x2-z0', 'x2-z1'], [
('a2-b2-c1', 'x2', ['x2-z0', 'x2-z1'], [], []),
('a2-b2-c2', 'x1', ['x1-z0'], [], []),
('a2-b2-cx', None, [], [], []),
]),
]),
('a3',
'x3',
['x3-z0', 'x3-z1', 'x3-z2'],
[
('a3-b1', 'x1', ['x1-z0'], [
('a3-b1-c1', 'x1', ['x1-z0'], [], []),
]),
('a3-b2', 'x2', ['x2-z0', 'x2-z1'], [
('a3-b2-c1', 'x2', ['x2-z0', 'x2-z1'], [], []),
('a3-b2-c2', 'x2', ['x2-z0', 'x2-z1'], [], []),
('a3-b2-cx1', None, [], [], []),
('a3-b2-cx2', None, [], [], []),
('a3-b2-cx3', None, [], [], []),
]),
('a3-b3', 'x3', ['x3-z0', 'x3-z1', 'x3-z2'], [
('a3-b3-c1', 'x3', ['x3-z0', 'x3-z1', 'x3-z2'], [], []),
('a3-b3-c2', 'x3', ['x3-z0', 'x3-z1', 'x3-z2'], [], []),
('a3-b3-c3', 'x3', ['x3-z0', 'x3-z1', 'x3-z2'],
['c1-1', 'c1-2', 'c1-3', 'c1-4'],
['c2-1', 'c2-2']),
]),
]),
]
for a, ax, azs, bs in data:
a = A.create(name=a, x=xs[ax])
for b, bx, bzs, cs in bs:
b = B.create(name=b, a=a, x=xs[bx])
for c, cx, czs, c1s, c2s in cs:
c = C.create(name=c, b=b, x=xs[cx])
for c1 in c1s:
C1.create(name=c1, c=c)
for c2 in c2s:
C2.create(name=c2, c=c)
AX = X.alias('ax')
AXZ = Z.alias('axz')
BX = X.alias('bx')
BXZ = Z.alias('bxz')
CX = X.alias('cx')
CXZ = Z.alias('cxz')
with self.assertQueryCount(11):
q = prefetch(A.select().order_by(A.name), *(
(AX, A), (AXZ, AX),
(B, A), (BX, B), (BXZ, BX),
(C, B), (CX, C), (CXZ, CX),
(C1, C), (C2, C)))
with self.assertQueryCount(0):
accum = []
for a in list(q):
azs = [z.name for z in a.x.z_set]
bs = []
for b in a.b_set:
bzs = [z.name for z in b.x.z_set]
cs = []
for c in b.c_set:
czs = [z.name for z in c.x.z_set] if c.x else []
c1s = [c1.name for c1 in c.c1_set]
c2s = [c2.name for c2 in c.c2_set]
cs.append((c.name, c.x.name if c.x else None, czs,
c1s, c2s))
bs.append((b.name, b.x.name, bzs, cs))
accum.append((a.name, a.x.name, azs, bs))
self.assertEqual(data, accum)
|
TestPrefetchMultiRefs
|
python
|
getsentry__sentry
|
src/sentry/insights/migrations/0001_squashed_0001_add_starred_transactions_model.py
|
{
"start": 325,
"end": 3140
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
replaces = [
("insights", "0001_add_starred_transactions_model"),
]
initial = True
checked = False # This is an initial migration and can take locks
dependencies = [
("sentry", "0001_squashed_0904_onboarding_task_project_id_idx"),
]
operations = [
migrations.CreateModel(
name="InsightsStarredSegment",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
primary_key=True, serialize=False
),
),
("date_updated", models.DateTimeField(auto_now=True)),
("date_added", models.DateTimeField(auto_now_add=True)),
(
"user_id",
sentry.db.models.fields.hybrid_cloud_foreign_key.HybridCloudForeignKey(
"sentry.User", db_index=True, on_delete="CASCADE"
),
),
("segment_name", models.CharField()),
(
"organization",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="sentry.organization"
),
),
(
"project",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="sentry.project"
),
),
],
options={
"db_table": "insights_starred_segments",
"unique_together": {("project", "user_id", "segment_name")},
},
),
]
|
Migration
|
python
|
marshmallow-code__marshmallow
|
src/marshmallow/fields.py
|
{
"start": 64036,
"end": 64253
}
|
class ____(IPInterface):
"""A IPv4 Network Interface field."""
default_error_messages = {"invalid_ip_interface": "Not a valid IPv4 interface."}
DESERIALIZATION_CLASS = ipaddress.IPv4Interface
|
IPv4Interface
|
python
|
pypa__virtualenv
|
src/virtualenv/seed/seeder.py
|
{
"start": 74,
"end": 1155
}
|
class ____(ABC):
"""A seeder will install some seed packages into a virtual environment."""
def __init__(self, options, enabled) -> None:
"""
Create.
:param options: the parsed options as defined within :meth:`add_parser_arguments`
:param enabled: a flag weather the seeder is enabled or not
"""
self.enabled = enabled
self.env = options.env
@classmethod
def add_parser_arguments(cls, parser, interpreter, app_data):
"""
Add CLI arguments for this seed mechanisms.
:param parser: the CLI parser
:param app_data: the CLI parser
:param interpreter: the interpreter this virtual environment is based of
"""
raise NotImplementedError
@abstractmethod
def run(self, creator):
"""
Perform the seed operation.
:param creator: the creator (based of :class:`virtualenv.create.creator.Creator`) we used to create this \
virtual environment
"""
raise NotImplementedError
__all__ = [
"Seeder",
]
|
Seeder
|
python
|
yaml__pyyaml
|
lib/yaml/events.py
|
{
"start": 479,
"end": 667
}
|
class ____(Event):
def __init__(self, anchor, start_mark=None, end_mark=None):
self.anchor = anchor
self.start_mark = start_mark
self.end_mark = end_mark
|
NodeEvent
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/api/datamodels/_generated.py
|
{
"start": 12851,
"end": 13021
}
|
class ____(str, Enum):
SUCCESS = "success"
FAILED = "failed"
SKIPPED = "skipped"
UPSTREAM_FAILED = "upstream_failed"
REMOVED = "removed"
|
TerminalTIState
|
python
|
fluentpython__example-code-2e
|
10-dp-1class-func/untyped/strategy_best.py
|
{
"start": 1587,
"end": 3097
}
|
class ____: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self)
return self.total() - discount
def __repr__(self):
return f'<Order total: {self.total():.2f} due: {self.due():.2f}>'
def fidelity_promo(order):
"""5% discount for customers with 1000 or more fidelity points"""
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
def bulk_item_promo(order):
"""10% discount for each LineItem with 20 or more units"""
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
def large_order_promo(order):
"""7% discount for orders with 10 or more distinct items"""
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * .07
return 0
# tag::STRATEGY_BEST[]
promos = [fidelity_promo, bulk_item_promo, large_order_promo] # <1>
def best_promo(order): # <2>
"""Select best discount available
"""
return max(promo(order) for promo in promos) # <3>
# end::STRATEGY_BEST[]
|
Order
|
python
|
celery__celery
|
t/unit/utils/test_imports.py
|
{
"start": 3901,
"end": 4064
}
|
class ____:
def test_no_module(self):
app = Mock()
app.name == '__main__'
assert gen_task_name(app, 'foo', 'axsadaewe')
|
test_gen_task_name
|
python
|
modin-project__modin
|
asv_bench/benchmarks/benchmarks.py
|
{
"start": 6620,
"end": 7847
}
|
class ____:
param_names = ["shapes", "how", "sort"]
params = [
get_benchmark_shapes("TimeMerge"),
["left", "inner"],
[True, False],
]
def setup(self, shapes, how, sort):
self.df1 = generate_dataframe("int", *shapes[0], RAND_LOW, RAND_HIGH)
self.df2 = generate_dataframe("int", *shapes[1], RAND_LOW, RAND_HIGH)
def time_merge(self, shapes, how, sort):
# merge dataframes by index to get the predictable shape
execute(
self.df1.merge(
self.df2, left_index=True, right_index=True, how=how, sort=sort
)
)
def time_merge_dataframe_empty_right(self, shapes, how, sort):
# Getting an empty dataframe using `iloc` should be very fast,
# so the impact on the time of the merge operation should be negligible.
execute(IMPL.merge(self.df1, self.df2.iloc[:0], how=how, sort=sort))
def time_merge_dataframe_empty_left(self, shapes, how, sort):
# Getting an empty dataframe using `iloc` should be very fast,
# so the impact on the time of the merge operation should be negligible.
execute(IMPL.merge(self.df1.iloc[:0], self.df2, how=how, sort=sort))
|
TimeMerge
|
python
|
walkccc__LeetCode
|
solutions/3275. K-th Nearest Obstacle Queries/3275.py
|
{
"start": 0,
"end": 328
}
|
class ____:
def resultsArray(self, queries: list[list[int]], k: int) -> list[int]:
ans = []
maxHeap = []
for x, y in queries:
heapq.heappush(maxHeap, -(abs(x) + abs(y)))
if len(maxHeap) > k:
heapq.heappop(maxHeap)
ans.append(-maxHeap[0] if len(maxHeap) == k else -1)
return ans
|
Solution
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/dsl/expressions/rolling.py
|
{
"start": 1221,
"end": 1279
}
|
class ____(UnaryOp):
pass
@dataclass(frozen=True)
|
RankOp
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_runner_strategy.py
|
{
"start": 1107,
"end": 1358
}
|
class ____(TestCase):
@given(st.runner())
def test_runner_is_self(self, runner):
assert runner is self
@given(st.runner(default=3))
def test_runner_is_self_even_with_default(self, runner):
assert runner is self
|
TestStuff
|
python
|
scikit-learn__scikit-learn
|
sklearn/_loss/link.py
|
{
"start": 5593,
"end": 8132
}
|
class ____(BaseLink):
"""The symmetric multinomial logit function.
Convention:
- y_pred.shape = raw_prediction.shape = (n_samples, n_classes)
Notes:
- The inverse link h is the softmax function.
- The sum is over the second axis, i.e. axis=1 (n_classes).
We have to choose additional constraints in order to make
y_pred[k] = exp(raw_pred[k]) / sum(exp(raw_pred[k]), k=0..n_classes-1)
for n_classes classes identifiable and invertible.
We choose the symmetric side constraint where the geometric mean response
is set as reference category, see [2]:
The symmetric multinomial logit link function for a single data point is
then defined as
raw_prediction[k] = g(y_pred[k]) = log(y_pred[k]/gmean(y_pred))
= log(y_pred[k]) - mean(log(y_pred)).
Note that this is equivalent to the definition in [1] and implies mean
centered raw predictions:
sum(raw_prediction[k], k=0..n_classes-1) = 0.
For linear models with raw_prediction = X @ coef, this corresponds to
sum(coef[k], k=0..n_classes-1) = 0, i.e. the sum over classes for every
feature is zero.
Reference
---------
.. [1] Friedman, Jerome; Hastie, Trevor; Tibshirani, Robert. "Additive
logistic regression: a statistical view of boosting" Ann. Statist.
28 (2000), no. 2, 337--407. doi:10.1214/aos/1016218223.
https://projecteuclid.org/euclid.aos/1016218223
.. [2] Zahid, Faisal Maqbool and Gerhard Tutz. "Ridge estimation for
multinomial logit models with symmetric side constraints."
Computational Statistics 28 (2013): 1017-1034.
http://epub.ub.uni-muenchen.de/11001/1/tr067.pdf
"""
is_multiclass = True
interval_y_pred = Interval(0, 1, False, False)
def symmetrize_raw_prediction(self, raw_prediction):
return raw_prediction - np.mean(raw_prediction, axis=1)[:, np.newaxis]
def link(self, y_pred, out=None):
# geometric mean as reference category
gm = gmean(y_pred, axis=1)
return np.log(y_pred / gm[:, np.newaxis], out=out)
def inverse(self, raw_prediction, out=None):
if out is None:
return softmax(raw_prediction, copy=True)
else:
np.copyto(out, raw_prediction)
softmax(out, copy=False)
return out
_LINKS = {
"identity": IdentityLink,
"log": LogLink,
"logit": LogitLink,
"half_logit": HalfLogitLink,
"multinomial_logit": MultinomialLogit,
}
|
MultinomialLogit
|
python
|
numba__numba
|
numba/misc/firstlinefinder.py
|
{
"start": 134,
"end": 3253
}
|
class ____(ast.NodeVisitor):
"""
Attributes
----------
first_stmt_line : int or None
This stores the first statement line number if the definition is found.
Or, ``None`` if the definition is not found.
"""
def __init__(self, name, firstlineno):
"""
Parameters
----------
code :
The function's code object.
"""
self._co_name = name
self._co_firstlineno = firstlineno
self.first_stmt_line = None
def _visit_children(self, node):
for child in ast.iter_child_nodes(node):
super().visit(child)
def visit_FunctionDef(self, node: ast.FunctionDef):
if node.name == self._co_name:
# Name of function matches.
# The `def` line may match co_firstlineno.
possible_start_lines = set([node.lineno])
if node.decorator_list:
# Has decorators.
# The first decorator line may match co_firstlineno.
first_decor = node.decorator_list[0]
possible_start_lines.add(first_decor.lineno)
# Does the first lineno match?
if self._co_firstlineno in possible_start_lines:
# Yes, we found the function.
# So, use the first statement line as the first line.
if node.body:
first_stmt = node.body[0]
if _is_docstring(first_stmt):
# Skip docstring
first_stmt = node.body[1]
self.first_stmt_line = first_stmt.lineno
return
else:
# This is probably unreachable.
# Function body cannot be bare. It must at least have
# A const string for docstring or a `pass`.
pass
self._visit_children(node)
def _is_docstring(node):
if isinstance(node, ast.Expr):
if (isinstance(node.value, ast.Constant)
and isinstance(node.value.value, str)):
return True
return False
def get_func_body_first_lineno(pyfunc):
"""
Look up the first line of function body using the file in
``pyfunc.__code__.co_filename``.
Returns
-------
lineno : int; or None
The first line number of the function body; or ``None`` if the first
line cannot be determined.
"""
co = pyfunc.__code__
try:
with open(co.co_filename) as fin:
source = fin.read()
offset = 0
except (FileNotFoundError, OSError):
try:
lines, offset = inspect.getsourcelines(pyfunc)
source = "".join(lines)
offset = offset - 1
except (OSError, TypeError):
return None
tree = ast.parse(textwrap.dedent(source))
finder = FindDefFirstLine(co.co_name, co.co_firstlineno - offset)
finder.visit(tree)
if finder.first_stmt_line:
return finder.first_stmt_line + offset
else:
# No first line found.
return None
|
FindDefFirstLine
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v3/serializers.py
|
{
"start": 13277,
"end": 13711
}
|
class ____(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, programming_language):
return programming_language
def get_name(self, programming_language):
for code, name in PROGRAMMING_LANGUAGES:
if code == programming_language:
return name
return "Unknown"
|
ProgrammingLanguageSerializer
|
python
|
scipy__scipy
|
scipy/stats/tests/test_distributions.py
|
{
"start": 162585,
"end": 168870
}
|
class ____:
def setup_method(self):
self.rng = np.random.default_rng(333348228)
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples, random_state=self.rng)
assert isinstance(x, np.ndarray)
for s, p in zip(states, probability):
assert abs(sum(x == s)/float(samples) - p) < 0.05
x = r.rvs(random_state=self.rng)
assert np.issubdtype(type(x), np.integer)
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_multidimension(self):
xk = np.arange(12).reshape((3, 4))
pk = np.array([[0.1, 0.1, 0.15, 0.05],
[0.1, 0.1, 0.05, 0.05],
[0.1, 0.1, 0.05, 0.05]])
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3]
pk = [0.5, 1.2, -0.7]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 2, 3, 4, 5]
pk = [0.3, 0.3, 0.3, 0.3, -0.2]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
xk = [1, 1]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
def test_shape_rv_sample(self):
# tests added for gh-9565
# mismatch of 2d inputs
xk, pk = np.arange(4).reshape((2, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same number of elements, but shapes not compatible
xk, pk = np.arange(6).reshape((3, 2)), np.full((2, 3), 1/6)
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
# same shapes => no error
xk, pk = np.arange(6).reshape((3, 2)), np.full((3, 2), 1/6)
assert_equal(stats.rv_discrete(values=(xk, pk)).pmf(0), 1/6)
def test_expect1(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_expect2(self):
# rv_sample should override _expect. Bug report from
# https://stackoverflow.com/questions/63199792
y = [200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0,
1100.0, 1200.0, 1300.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0,
1900.0, 2000.0, 2100.0, 2200.0, 2300.0, 2400.0, 2500.0, 2600.0,
2700.0, 2800.0, 2900.0, 3000.0, 3100.0, 3200.0, 3300.0, 3400.0,
3500.0, 3600.0, 3700.0, 3800.0, 3900.0, 4000.0, 4100.0, 4200.0,
4300.0, 4400.0, 4500.0, 4600.0, 4700.0, 4800.0]
py = [0.0004, 0.0, 0.0033, 0.006500000000000001, 0.0, 0.0,
0.004399999999999999, 0.6862, 0.0, 0.0, 0.0,
0.00019999999999997797, 0.0006000000000000449,
0.024499999999999966, 0.006400000000000072,
0.0043999999999999595, 0.019499999999999962,
0.03770000000000007, 0.01759999999999995, 0.015199999999999991,
0.018100000000000005, 0.04500000000000004, 0.0025999999999999357,
0.0, 0.0041000000000001036, 0.005999999999999894,
0.0042000000000000925, 0.0050000000000000044,
0.0041999999999999815, 0.0004999999999999449,
0.009199999999999986, 0.008200000000000096,
0.0, 0.0, 0.0046999999999999265, 0.0019000000000000128,
0.0006000000000000449, 0.02510000000000001, 0.0,
0.007199999999999984, 0.0, 0.012699999999999934, 0.0, 0.0,
0.008199999999999985, 0.005600000000000049, 0.0]
rv = stats.rv_discrete(values=(y, py))
# check the mean
assert_allclose(rv.expect(), rv.mean(), atol=1e-14)
assert_allclose(rv.expect(),
sum(v * w for v, w in zip(y, py)), atol=1e-14)
# also check the second moment
assert_allclose(rv.expect(lambda x: x**2),
sum(v**2 * w for v, w in zip(y, py)), atol=1e-14)
|
TestRvDiscrete
|
python
|
doocs__leetcode
|
solution/0300-0399/0303.Range Sum Query - Immutable/Solution.py
|
{
"start": 0,
"end": 337
}
|
class ____:
def __init__(self, nums: List[int]):
self.s = list(accumulate(nums, initial=0))
def sumRange(self, left: int, right: int) -> int:
return self.s[right + 1] - self.s[left]
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(left,right)
|
NumArray
|
python
|
django__django
|
tests/file_storage/test_generate_filename.py
|
{
"start": 2329,
"end": 9420
}
|
class ____(SimpleTestCase):
def test_storage_dangerous_paths(self):
candidates = [
("/tmp/..", ".."),
("\\tmp\\..", ".."),
("/tmp/.", "."),
("\\tmp\\.", "."),
("..", ".."),
(".", "."),
("", ""),
]
s = FileSystemStorage()
s_overwrite = FileSystemStorage(allow_overwrite=True)
msg = "Could not derive file name from '%s'"
for file_name, base_name in candidates:
with self.subTest(file_name=file_name):
with self.assertRaisesMessage(SuspiciousFileOperation, msg % base_name):
s.get_available_name(file_name)
with self.assertRaisesMessage(SuspiciousFileOperation, msg % base_name):
s_overwrite.get_available_name(file_name)
with self.assertRaisesMessage(SuspiciousFileOperation, msg % base_name):
s.generate_filename(file_name)
def test_storage_dangerous_paths_dir_name(self):
candidates = [
("../path", ".."),
("..\\path", ".."),
("tmp/../path", "tmp/.."),
("tmp\\..\\path", "tmp/.."),
("/tmp/../path", "/tmp/.."),
("\\tmp\\..\\path", "/tmp/.."),
]
s = FileSystemStorage()
s_overwrite = FileSystemStorage(allow_overwrite=True)
for file_name, path in candidates:
msg = "Detected path traversal attempt in '%s'" % path
with self.subTest(file_name=file_name):
with self.assertRaisesMessage(SuspiciousFileOperation, msg):
s.get_available_name(file_name)
with self.assertRaisesMessage(SuspiciousFileOperation, msg):
s_overwrite.get_available_name(file_name)
with self.assertRaisesMessage(SuspiciousFileOperation, msg):
s.generate_filename(file_name)
def test_filefield_dangerous_filename(self):
candidates = [
("..", "some/folder/.."),
(".", "some/folder/."),
("", "some/folder/"),
("???", "???"),
("$.$.$", "$.$.$"),
]
f = FileField(upload_to="some/folder/")
for file_name, msg_file_name in candidates:
msg = f"Could not derive file name from '{msg_file_name}'"
with self.subTest(file_name=file_name):
with self.assertRaisesMessage(SuspiciousFileOperation, msg):
f.generate_filename(None, file_name)
def test_filefield_dangerous_filename_dot_segments(self):
f = FileField(upload_to="some/folder/")
msg = "Detected path traversal attempt in 'some/folder/../path'"
with self.assertRaisesMessage(SuspiciousFileOperation, msg):
f.generate_filename(None, "../path")
def test_filefield_generate_filename_absolute_path(self):
f = FileField(upload_to="some/folder/")
candidates = [
"/tmp/path",
"/tmp/../path",
]
for file_name in candidates:
msg = f"Detected path traversal attempt in '{file_name}'"
with self.subTest(file_name=file_name):
with self.assertRaisesMessage(SuspiciousFileOperation, msg):
f.generate_filename(None, file_name)
def test_filefield_generate_filename(self):
f = FileField(upload_to="some/folder/")
self.assertEqual(
f.generate_filename(None, "test with space.txt"),
os.path.normpath("some/folder/test_with_space.txt"),
)
def test_filefield_generate_filename_with_upload_to(self):
def upload_to(instance, filename):
return "some/folder/" + filename
f = FileField(upload_to=upload_to)
self.assertEqual(
f.generate_filename(None, "test with space.txt"),
os.path.normpath("some/folder/test_with_space.txt"),
)
def test_filefield_generate_filename_upload_to_overrides_dangerous_filename(self):
def upload_to(instance, filename):
return "test.txt"
f = FileField(upload_to=upload_to)
candidates = [
"/tmp/.",
"/tmp/..",
"/tmp/../path",
"/tmp/path",
"some/folder/",
"some/folder/.",
"some/folder/..",
"some/folder/???",
"some/folder/$.$.$",
"some/../test.txt",
"",
]
for file_name in candidates:
with self.subTest(file_name=file_name):
self.assertEqual(f.generate_filename(None, file_name), "test.txt")
def test_filefield_generate_filename_upload_to_absolute_path(self):
def upload_to(instance, filename):
return "/tmp/" + filename
f = FileField(upload_to=upload_to)
candidates = [
"path",
"../path",
"???",
"$.$.$",
]
for file_name in candidates:
msg = f"Detected path traversal attempt in '/tmp/{file_name}'"
with self.subTest(file_name=file_name):
with self.assertRaisesMessage(SuspiciousFileOperation, msg):
f.generate_filename(None, file_name)
def test_filefield_generate_filename_upload_to_dangerous_filename(self):
def upload_to(instance, filename):
return "/tmp/" + filename
f = FileField(upload_to=upload_to)
candidates = ["..", ".", ""]
for file_name in candidates:
msg = f"Could not derive file name from '/tmp/{file_name}'"
with self.subTest(file_name=file_name):
with self.assertRaisesMessage(SuspiciousFileOperation, msg):
f.generate_filename(None, file_name)
def test_filefield_awss3_storage(self):
"""
Simulate a FileField with an S3 storage which uses keys rather than
folders and names. FileField and Storage shouldn't have any os.path()
calls that break the key.
"""
storage = AWSS3Storage()
folder = "not/a/folder/"
f = FileField(upload_to=folder, storage=storage)
key = "my-file-key\\with odd characters"
data = ContentFile("test")
expected_key = AWSS3Storage.prefix + folder + key
# Simulate call to f.save()
result_key = f.generate_filename(None, key)
self.assertEqual(result_key, expected_key)
result_key = storage.save(result_key, data)
self.assertEqual(result_key, expected_key)
# Repeat test with a callable.
def upload_to(instance, filename):
# Return a non-normalized path on purpose.
return folder + filename
f = FileField(upload_to=upload_to, storage=storage)
# Simulate call to f.save()
result_key = f.generate_filename(None, key)
self.assertEqual(result_key, expected_key)
result_key = storage.save(result_key, data)
self.assertEqual(result_key, expected_key)
|
GenerateFilenameStorageTests
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/errors.py
|
{
"start": 6861,
"end": 7322
}
|
class ____(HypothesisWarning, FutureWarning):
"""A deprecation warning issued by Hypothesis.
Actually inherits from FutureWarning, because DeprecationWarning is
hidden by the default warnings filter.
You can configure the :mod:`python:warnings` module to handle these
warnings differently to others, either turning them into errors or
suppressing them entirely. Obviously we would prefer the former!
"""
|
HypothesisDeprecationWarning
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/tests/tpu_embedding_v2_correctness_hd_sparse_forward_test.py
|
{
"start": 954,
"end": 1440
}
|
class ____(
tpu_embedding_v2_correctness_base_test.TPUEmbeddingCorrectnessBaseTest):
@parameterized.parameters(
['sgd', 'adagrad', 'adam', 'ftrl', 'adagrad_momentum'])
def test_embedding(self, optimizer_name):
if optimizer_name != 'sgd':
self.skip_if_oss()
self._test_embedding(
optimizer_name, training=False, sparse=True, is_high_dimensional=True)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
TPUEmbeddingCorrectnessTest
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/tutorial/connecting/resources/__init__.py
|
{
"start": 43,
"end": 175
}
|
class ____(ConfigurableResource):
num_days: int = 4
def get_signups(self):
return [1, 2, 3, 4, 5]
|
DataGeneratorResource
|
python
|
kamyu104__LeetCode-Solutions
|
Python/super-pow.py
|
{
"start": 50,
"end": 587
}
|
class ____(object):
def superPow(self, a, b):
"""
:type a: int
:type b: List[int]
:rtype: int
"""
def myPow(a, n, b):
result = 1
x = a % b
while n:
if n & 1:
result = result * x % b
n >>= 1
x = x * x % b
return result % b
result = 1
for digit in b:
result = myPow(result, 10, 1337) * myPow(a, digit, 1337) % 1337
return result
|
Solution
|
python
|
openai__openai-python
|
src/openai/types/responses/response_code_interpreter_call_in_progress_event.py
|
{
"start": 219,
"end": 767
}
|
class ____(BaseModel):
item_id: str
"""The unique identifier of the code interpreter tool call item."""
output_index: int
"""
The index of the output item in the response for which the code interpreter call
is in progress.
"""
sequence_number: int
"""The sequence number of this event, used to order streaming events."""
type: Literal["response.code_interpreter_call.in_progress"]
"""The type of the event. Always `response.code_interpreter_call.in_progress`."""
|
ResponseCodeInterpreterCallInProgressEvent
|
python
|
pytorch__pytorch
|
test/distributed/_tools/test_runtime_estimator.py
|
{
"start": 655,
"end": 2030
}
|
class ____(nn.Module):
def __init__(self, conv_args: ConvArgs):
super().__init__()
image_size = conv_args.image_size
num_classes = conv_args.num_classes
self.image_size = image_size
self.conv1 = nn.Conv2d(3, 32, kernel_size=5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
self.conv4 = nn.Conv2d(128, 256, kernel_size=3)
self.fc1_size = self._calculate_fc1_size()
self.fc1 = nn.Linear(self.fc1_size, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, num_classes)
def _calculate_fc1_size(self):
size = self.image_size
size = (size - 5 + 1) // 2 # conv1 and pool
size = (size - 5 + 1) // 2 # conv2 and pool
size = size - 3 + 1 # conv3
size = (size - 3 + 1) // 2 # conv4 and pool
return 512 * size * size
def forward(self, x):
x = self.pool(nn.functional.relu(self.conv1(x)))
x = self.pool(nn.functional.relu(self.conv2(x)))
x = nn.functional.relu(self.conv3(x))
x = self.pool(nn.functional.relu(self.conv4(x)))
x = x.view(-1, self.fc1_size)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = self.fc3(x)
return x
|
SimpleCNN
|
python
|
pandas-dev__pandas
|
pandas/tests/frame/methods/test_set_axis.py
|
{
"start": 3269,
"end": 3945
}
|
class ____(SharedSetAxisTests):
@pytest.fixture
def obj(self):
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2], "C": [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012],
)
return df
def test_set_axis_with_allows_duplicate_labels_false(self):
# GH#44958
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]).set_flags(
allows_duplicate_labels=False
)
result = df.set_axis(labels=["x", "y"], axis=0)
expected = DataFrame([[1, 2], [3, 4]], index=["x", "y"], columns=["a", "b"])
tm.assert_frame_equal(result, expected, check_flags=False)
|
TestDataFrameSetAxis
|
python
|
PrefectHQ__prefect
|
src/prefect/server/events/actions.py
|
{
"start": 49650,
"end": 51303
}
|
class ____(Action):
"""Base class for Actions that operate on Work Pools and need to infer them from
events"""
source: Literal["selected", "inferred"] = Field(
"selected",
description=(
"Whether this Action applies to a specific selected "
"work pool (given by `work_pool_id`), or to a work pool that is "
"inferred from the triggering event. If the source is 'inferred', "
"the `work_pool_id` may not be set. If the source is 'selected', the "
"`work_pool_id` must be set."
),
)
work_pool_id: Optional[UUID] = Field(
None,
description="The identifier of the work pool to pause",
)
@model_validator(mode="after")
def selected_work_pool_requires_id(self) -> Self:
wants_selected_work_pool = self.source == "selected"
has_work_pool_id = bool(self.work_pool_id)
if wants_selected_work_pool != has_work_pool_id:
raise ValueError(
"work_pool_id is " + ("not allowed" if has_work_pool_id else "required")
)
return self
async def work_pool_id_to_use(self, triggered_action: "TriggeredAction") -> UUID:
if self.source == "selected":
assert self.work_pool_id
return self.work_pool_id
event = triggered_action.triggering_event
if not event:
raise ActionFailed("No event to infer the work pool")
assert event
if id := _id_of_first_resource_of_kind(event, "prefect.work-pool"):
return id
raise ActionFailed("No work pool could be inferred")
|
WorkPoolAction
|
python
|
astropy__astropy
|
astropy/units/tests/test_quantity_array_methods.py
|
{
"start": 391,
"end": 2762
}
|
class ____:
"""
Test whether arrays are properly copied/used in place
"""
def test_copy_on_creation(self):
v = np.arange(1000.0)
q_nocopy = u.Quantity(v, "km/s", copy=False)
q_copy = u.Quantity(v, "km/s", copy=True)
v[0] = -1.0
assert q_nocopy[0].value == v[0]
assert q_copy[0].value != v[0]
def test_to_copies(self):
q = u.Quantity(np.arange(1.0, 100.0), "km/s")
q2 = q.to(u.m / u.s)
assert np.all(q.value != q2.value)
q3 = q.to(u.km / u.s)
assert np.all(q.value == q3.value)
q[0] = -1.0 * u.km / u.s
assert q[0].value != q3[0].value
def test_si_copies(self):
q = u.Quantity(np.arange(100.0), "m/s")
q2 = q.si
assert np.all(q.value == q2.value)
q[0] = -1.0 * u.m / u.s
assert q[0].value != q2[0].value
def test_getitem_is_view(self):
"""Check that [keys] work, and that, like ndarray, it returns
a view, so that changing one changes the other.
Also test that one can add axes (closes #1422)
"""
q = u.Quantity(np.arange(100.0), "m/s")
q_sel = q[10:20]
q_sel[0] = -1.0 * u.m / u.s
assert q_sel[0] == q[10]
# also check that getitem can do new axes
q2 = q[:, np.newaxis]
q2[10, 0] = -9 * u.m / u.s
assert np.all(q2.flatten() == q)
def test_flat(self):
q = u.Quantity(np.arange(9.0).reshape(3, 3), "m/s")
q_flat = q.flat
# check that a single item is a quantity (with the right value)
assert q_flat[8] == 8.0 * u.m / u.s
# and that getting a range works as well
assert np.all(q_flat[0:2] == np.arange(2.0) * u.m / u.s)
# as well as getting items via iteration
q_flat_list = list(q.flat)
assert np.all(u.Quantity(q_flat_list) == u.Quantity(list(q.value.flat), q.unit))
# check that flat works like a view of the real array
q_flat[8] = -1.0 * u.km / u.s
assert q_flat[8] == -1.0 * u.km / u.s
assert q[2, 2] == -1.0 * u.km / u.s
# while if one goes by an iterated item, a copy is made
q_flat_list[8] = -2 * u.km / u.s
assert q_flat_list[8] == -2.0 * u.km / u.s
assert q_flat[8] == -1.0 * u.km / u.s
assert q[2, 2] == -1.0 * u.km / u.s
|
TestQuantityArrayCopy
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-salesforce/source_salesforce/streams.py
|
{
"start": 17508,
"end": 19254
}
|
class ____(StreamSlicer):
def __init__(self, cursor: Optional[ConcurrentCursor]) -> None:
self._cursor = cursor
def get_request_params(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
return {}
def get_request_headers(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
return {}
def get_request_body_data(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Union[Mapping[str, Any], str]:
return {}
def get_request_body_json(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
return {}
def stream_slices(self) -> Iterable[StreamSlice]:
if not self._cursor:
yield from [StreamSlice(partition={}, cursor_slice={})]
return
for stream_slice in self._cursor.stream_slices():
yield StreamSlice(
partition={},
cursor_slice={
"start_date": stream_slice["start_date"].replace("Z", "+00:00"),
"end_date": stream_slice["end_date"].replace("Z", "+00:00"),
},
)
|
BulkDatetimeStreamSlicer
|
python
|
django__django
|
tests/gis_tests/gdal_tests/test_raster.py
|
{
"start": 27123,
"end": 35282
}
|
class ____(SimpleTestCase):
rs_path = os.path.join(os.path.dirname(__file__), "../data/rasters/raster.tif")
def test_band_data(self):
rs = GDALRaster(self.rs_path)
band = rs.bands[0]
self.assertEqual(band.width, 163)
self.assertEqual(band.height, 174)
self.assertEqual(band.description, "")
self.assertEqual(band.datatype(), 1)
self.assertEqual(band.datatype(as_string=True), "GDT_Byte")
self.assertEqual(band.color_interp(), 1)
self.assertEqual(band.color_interp(as_string=True), "GCI_GrayIndex")
self.assertEqual(band.nodata_value, 15)
if numpy:
data = band.data()
assert_array = numpy.loadtxt(
os.path.join(
os.path.dirname(__file__), "../data/rasters/raster.numpy.txt"
)
)
numpy.testing.assert_equal(data, assert_array)
self.assertEqual(data.shape, (band.height, band.width))
def test_band_statistics(self):
with tempfile.TemporaryDirectory() as tmp_dir:
rs_path = os.path.join(tmp_dir, "raster.tif")
shutil.copyfile(self.rs_path, rs_path)
rs = GDALRaster(rs_path)
band = rs.bands[0]
pam_file = rs_path + ".aux.xml"
smin, smax, smean, sstd = band.statistics(approximate=True)
self.assertEqual(smin, 0)
self.assertEqual(smax, 9)
self.assertAlmostEqual(smean, 2.842331288343558)
self.assertAlmostEqual(sstd, 2.3965567248965356)
smin, smax, smean, sstd = band.statistics(approximate=False, refresh=True)
self.assertEqual(smin, 0)
self.assertEqual(smax, 9)
self.assertAlmostEqual(smean, 2.828326634228898)
self.assertAlmostEqual(sstd, 2.4260526986669095)
self.assertEqual(band.min, 0)
self.assertEqual(band.max, 9)
self.assertAlmostEqual(band.mean, 2.828326634228898)
self.assertAlmostEqual(band.std, 2.4260526986669095)
# Statistics are persisted into PAM file on band close
rs = band = None
self.assertTrue(os.path.isfile(pam_file))
def _remove_aux_file(self):
pam_file = self.rs_path + ".aux.xml"
if os.path.isfile(pam_file):
os.remove(pam_file)
def test_read_mode_error(self):
# Open raster in read mode
rs = GDALRaster(self.rs_path, write=False)
band = rs.bands[0]
self.addCleanup(self._remove_aux_file)
# Setting attributes in write mode raises exception in the _flush
# method
with self.assertRaises(GDALException):
setattr(band, "nodata_value", 10)
def test_band_data_setters(self):
# Create in-memory raster and get band
rsmem = GDALRaster(
{
"datatype": 1,
"driver": "MEM",
"name": "mem_rst",
"width": 10,
"height": 10,
"nr_of_bands": 1,
"srid": 4326,
}
)
bandmem = rsmem.bands[0]
# Set nodata value
bandmem.nodata_value = 99
self.assertEqual(bandmem.nodata_value, 99)
# Set data for entire dataset
bandmem.data(range(100))
if numpy:
numpy.testing.assert_equal(
bandmem.data(), numpy.arange(100).reshape(10, 10)
)
else:
self.assertEqual(bandmem.data(), list(range(100)))
# Prepare data for setting values in subsequent tests
block = list(range(100, 104))
packed_block = struct.pack("<" + "B B B B", *block)
# Set data from list
bandmem.data(block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from packed block
bandmem.data(packed_block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytes
bandmem.data(bytes(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytearray
bandmem.data(bytearray(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from memoryview
bandmem.data(memoryview(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from numpy array
if numpy:
bandmem.data(numpy.array(block, dtype="int8").reshape(2, 2), (1, 1), (2, 2))
numpy.testing.assert_equal(
bandmem.data(offset=(1, 1), size=(2, 2)),
numpy.array(block).reshape(2, 2),
)
# Test json input data
rsmemjson = GDALRaster(JSON_RASTER)
bandmemjson = rsmemjson.bands[0]
if numpy:
numpy.testing.assert_equal(
bandmemjson.data(), numpy.array(range(25)).reshape(5, 5)
)
else:
self.assertEqual(bandmemjson.data(), list(range(25)))
def test_band_statistics_automatic_refresh(self):
rsmem = GDALRaster(
{
"srid": 4326,
"width": 2,
"height": 2,
"bands": [{"data": [0] * 4, "nodata_value": 99}],
}
)
band = rsmem.bands[0]
# Populate statistics cache
self.assertEqual(band.statistics(), (0, 0, 0, 0))
# Change data
band.data([1, 1, 0, 0])
# Statistics are properly updated
self.assertEqual(band.statistics(), (0.0, 1.0, 0.5, 0.5))
# Change nodata_value
band.nodata_value = 0
# Statistics are properly updated
self.assertEqual(band.statistics(), (1.0, 1.0, 1.0, 0.0))
def test_band_statistics_empty_band(self):
rsmem = GDALRaster(
{
"srid": 4326,
"width": 1,
"height": 1,
"bands": [{"data": [0], "nodata_value": 0}],
}
)
self.assertEqual(rsmem.bands[0].statistics(), (None, None, None, None))
def test_band_delete_nodata(self):
rsmem = GDALRaster(
{
"srid": 4326,
"width": 1,
"height": 1,
"bands": [{"data": [0], "nodata_value": 1}],
}
)
rsmem.bands[0].nodata_value = None
self.assertIsNone(rsmem.bands[0].nodata_value)
def test_band_data_replication(self):
band = GDALRaster(
{
"srid": 4326,
"width": 3,
"height": 3,
"bands": [{"data": range(10, 19), "nodata_value": 0}],
}
).bands[0]
# Variations for input (data, shape, expected result).
combos = (
([1], (1, 1), [1] * 9),
(range(3), (1, 3), [0, 0, 0, 1, 1, 1, 2, 2, 2]),
(range(3), (3, 1), [0, 1, 2, 0, 1, 2, 0, 1, 2]),
)
for combo in combos:
band.data(combo[0], shape=combo[1])
if numpy:
numpy.testing.assert_equal(
band.data(), numpy.array(combo[2]).reshape(3, 3)
)
else:
self.assertEqual(band.data(), list(combo[2]))
|
GDALBandTests
|
python
|
docker__docker-py
|
docker/errors.py
|
{
"start": 2697,
"end": 2747
}
|
class ____(DockerException):
pass
|
InvalidVersion
|
python
|
pypa__warehouse
|
warehouse/utils/wsgi.py
|
{
"start": 835,
"end": 4484
}
|
class ____:
def __init__(self, app, token, ip_salt: str, num_proxies=1):
self.app = app
self.token = token
self.ip_salt = ip_salt
self.num_proxies = num_proxies
def __call__(self, environ, start_response):
# Determine if the request comes from a trusted proxy or not by looking
# for a token in the request.
request_token = environ.get("HTTP_WAREHOUSE_TOKEN")
if request_token is not None and hmac.compare_digest(self.token, request_token):
# Compute our values from the environment.
proto = environ.get("HTTP_WAREHOUSE_PROTO", "")
remote_addr = environ.get("HTTP_WAREHOUSE_IP", "")
remote_addr_hashed = environ.get("HTTP_WAREHOUSE_HASHED_IP", "")
geoip_info = {
k: environ.get(f"HTTP_WAREHOUSE_{v}")
for k, v in GEOIP_FIELDS.items()
if environ.get(f"HTTP_WAREHOUSE_{v}") is not None
}
host = environ.get("HTTP_WAREHOUSE_HOST", "")
# If we're not getting headers from a trusted third party via the
# specialized Warehouse-* headers, then we'll fall back to looking at
# X-Forwarded-* headers, assuming that whatever we have in front of us
# will strip invalid ones.
else:
# If there IS a token, but it doesn't match, then tell us about it.
if request_token is not None and not hmac.compare_digest(
self.token, request_token
):
sentry_sdk.set_context(
self.__class__.__name__, {"token": request_token}
)
sentry_sdk.capture_message(
"Invalid Proxy Token",
level="warning",
)
proto = environ.get("HTTP_X_FORWARDED_PROTO", "")
# Special case: if we don't see a X-Forwarded-For, this may be a local
# development instance of Warehouse and the original REMOTE_ADDR is accurate
remote_addr = _forwarded_value(
environ.get("HTTP_X_FORWARDED_FOR", ""), self.num_proxies
) or environ.get("REMOTE_ADDR")
remote_addr_hashed = (
hashlib.sha256((remote_addr + self.ip_salt).encode("utf8")).hexdigest()
if remote_addr
else ""
)
host = environ.get("HTTP_X_FORWARDED_HOST", "")
geoip_info = {}
# Put the new header values into our environment.
if remote_addr:
environ["REMOTE_ADDR"] = remote_addr
if remote_addr_hashed:
environ["REMOTE_ADDR_HASHED"] = remote_addr_hashed
for k, v in GEOIP_FIELDS.items():
if k in geoip_info:
environ[f"GEOIP_{v}"] = geoip_info[k]
if host:
environ["HTTP_HOST"] = host
if proto:
environ["wsgi.url_scheme"] = proto
# Remove any of the forwarded or warehouse headers from the environment
for header in {
"HTTP_X_FORWARDED_PROTO",
"HTTP_X_FORWARDED_FOR",
"HTTP_X_FORWARDED_HOST",
"HTTP_X_FORWARDED_PORT",
"HTTP_WAREHOUSE_TOKEN",
"HTTP_WAREHOUSE_PROTO",
"HTTP_WAREHOUSE_IP",
"HTTP_WAREHOUSE_HASHED_IP",
"HTTP_WAREHOUSE_HOST",
*[f"HTTP_WAREHOUSE_{v}" for v in GEOIP_FIELDS.values()],
}:
if header in environ:
del environ[header]
# Dispatch to the real underlying application.
return self.app(environ, start_response)
|
ProxyFixer
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/tags.py
|
{
"start": 505,
"end": 769
}
|
class ____(graphene.ObjectType):
key = graphene.NonNull(graphene.String)
value = graphene.NonNull(graphene.String)
class Meta:
name = "EventTag"
def __init__(self, key, value):
super().__init__(key=key, value=value)
|
GrapheneEventTag
|
python
|
sqlalchemy__sqlalchemy
|
test/typing/plain_files/orm/mapped_column.py
|
{
"start": 588,
"end": 5329
}
|
class ____(Base):
__tablename__ = "x"
# these are fine - pk, column is not null, have the attribute be
# non-optional, fine
id: Mapped[int] = mapped_column(primary_key=True)
int_id: Mapped[int] = mapped_column(Integer, primary_key=True)
# but this is also "fine" because the developer may wish to have the object
# in a pending state with None for the id for some period of time.
# "primary_key=True" will still be interpreted correctly in DDL
err_int_id: Mapped[Optional[int]] = mapped_column(
Integer, primary_key=True
)
# also fine, X(err_int_id_name) is None when you first make the
# object
err_int_id_name: Mapped[Optional[int]] = mapped_column(
"err_int_id_name", Integer, primary_key=True
)
id_name: Mapped[int] = mapped_column("id_name", primary_key=True)
int_id_name: Mapped[int] = mapped_column(
"int_id_name", Integer, primary_key=True
)
a: Mapped[str] = mapped_column()
b: Mapped[Optional[str]] = mapped_column()
# this can't be detected because we don't know the type
c: Mapped[str] = mapped_column(nullable=True)
d: Mapped[str] = mapped_column(nullable=False)
e: Mapped[Optional[str]] = mapped_column(ForeignKey(c), nullable=True)
f1 = mapped_column(Integer)
f: Mapped[Optional[str]] = mapped_column(ForeignKey(f1), nullable=False)
g: Mapped[str] = mapped_column(String)
h: Mapped[Optional[str]] = mapped_column(String)
# this probably is wrong. however at the moment it seems better to
# decouple the right hand arguments from declaring things about the
# left side since it mostly doesn't work in any case.
i: Mapped[str] = mapped_column(String, nullable=True)
j: Mapped[str] = mapped_column(String, nullable=False)
k: Mapped[Optional[str]] = mapped_column(String, nullable=True)
l: Mapped[Optional[str]] = mapped_column(String, nullable=False)
a_name: Mapped[str] = mapped_column("a_name")
b_name: Mapped[Optional[str]] = mapped_column("b_name")
c_name: Mapped[str] = mapped_column("c_name", nullable=True)
d_name: Mapped[str] = mapped_column("d_name", nullable=False)
e_name: Mapped[Optional[str]] = mapped_column("e_name", nullable=True)
f_name: Mapped[Optional[str]] = mapped_column("f_name", nullable=False)
g_name: Mapped[str] = mapped_column("g_name", String)
h_name: Mapped[Optional[str]] = mapped_column("h_name", String)
i_name: Mapped[str] = mapped_column("i_name", String, nullable=True)
j_name: Mapped[str] = mapped_column("j_name", String, nullable=False)
k_name: Mapped[Optional[str]] = mapped_column(
"k_name", String, nullable=True
)
l_name: Mapped[Optional[str]] = mapped_column(
"l_name",
String,
nullable=False,
)
__table_args__ = (UniqueConstraint(a, b, name="uq1"), Index("ix1", c, d))
mapped_column()
mapped_column(
init=True,
repr=True,
default=42,
compare=True,
kw_only=True,
primary_key=True,
deferred=True,
deferred_group="str",
deferred_raiseload=True,
use_existing_column=True,
name="str",
type_=Integer(),
doc="str",
key="str",
index=True,
unique=True,
info={"str": 42},
active_history=True,
quote=True,
system=True,
comment="str",
sort_order=-1,
any_kwarg="str",
another_kwarg=42,
)
mapped_column(default_factory=lambda: 1)
mapped_column(default_factory=lambda: "str")
mapped_column(nullable=True)
mapped_column(nullable=SchemaConst.NULL_UNSPECIFIED)
mapped_column(autoincrement=True)
mapped_column(autoincrement="auto")
mapped_column(autoincrement="ignore_fk")
mapped_column(onupdate=1)
mapped_column(onupdate="str")
mapped_column(insert_default=1)
mapped_column(insert_default="str")
mapped_column(server_default=FetchedValue())
mapped_column(server_default=true())
mapped_column(server_default=func.now())
mapped_column(server_default="NOW()")
mapped_column(server_default=text("NOW()"))
mapped_column(server_default=literal_column("false", Boolean))
mapped_column(server_onupdate=FetchedValue())
mapped_column(server_onupdate=true())
mapped_column(server_onupdate=func.now())
mapped_column(server_onupdate="NOW()")
mapped_column(server_onupdate=text("NOW()"))
mapped_column(server_onupdate=literal_column("false", Boolean))
mapped_column(
default=None,
nullable=None,
primary_key=None,
deferred_group=None,
deferred_raiseload=None,
name=None,
type_=None,
doc=None,
key=None,
index=None,
unique=None,
info=None,
onupdate=None,
insert_default=None,
server_default=None,
server_onupdate=None,
quote=None,
comment=None,
any_kwarg=None,
)
|
X
|
python
|
aio-libs__aiohttp
|
aiohttp/web_exceptions.py
|
{
"start": 7352,
"end": 8144
}
|
class ____(HTTPClientError):
status_code = 405
def __init__(
self,
method: str,
allowed_methods: Iterable[str],
*,
headers: LooseHeaders | None = None,
reason: str | None = None,
text: str | None = None,
content_type: str | None = None,
) -> None:
allow = ",".join(sorted(allowed_methods))
super().__init__(
headers=headers, reason=reason, text=text, content_type=content_type
)
self.headers["Allow"] = allow
self._allowed: set[str] = set(allowed_methods)
self._method = method
@property
def allowed_methods(self) -> set[str]:
return self._allowed
@property
def method(self) -> str:
return self._method
|
HTTPMethodNotAllowed
|
python
|
PrefectHQ__prefect
|
tests/utilities/schema_tools/test_hydration.py
|
{
"start": 11819,
"end": 15131
}
|
class ____:
@pytest.mark.parametrize(
"input_object, expected_output, ctx",
[
(
# The workspace variable is resolved first.
# It returns a jinja template that renders
# and outputs a JSON string of '"4"'.
# the JSON string is then decoded to give
# an actual integer value.
{
"param": {
"__prefect_kind": "json",
"value": {
"__prefect_kind": "jinja",
"template": {
"__prefect_kind": "workspace_variable",
"variable_name": "2_plus_2",
},
},
}
},
{"param": 4},
HydrationContext(
render_jinja=True,
render_workspace_variables=True,
workspace_variables={"2_plus_2": "{{ (2 + 2) | tojson }}"},
),
),
],
)
def test_nested_hydration(self, input_object, expected_output, ctx):
assert hydrate(input_object, ctx) == expected_output
@pytest.mark.parametrize(
"input_object, expected_output, ctx",
[
(
{
"my_object": {
"__prefect_kind": "json",
"value": {
"__prefect_kind": "jinja",
"template": "{{ event.payload.body | tojson }}",
},
}
},
{"my_object": {"json_key": "json_value"}},
HydrationContext(
jinja_context={
"event": {"payload": {"body": {"json_key": "json_value"}}}
},
render_jinja=True,
),
),
],
)
def test_extract_an_object(self, input_object, expected_output, ctx):
assert hydrate(input_object, ctx) == expected_output
@pytest.mark.parametrize(
"input_object, expected_output, ctx",
[
(
{
"my_object": {
"__prefect_kind": "json",
"value": {
"__prefect_kind": "jinja",
"template": "{{ event.payload.body | tojson }}",
},
}
},
{"my_object": ValidJinja("{{ event.payload.body | tojson }}")},
HydrationContext(
jinja_context={
"event": {"payload": {"body": {"json_key": "json_value"}}}
},
render_jinja=False,
),
),
],
)
def test_placeholders_bubble_up(self, input_object, expected_output, ctx):
# render_jinja=False, so the jinja template is not rendered.
# If the parent __prefect_kind sees a Placeholder, it should just continue to bubble
# the Placeholder up the chain.
assert hydrate(input_object, ctx) == expected_output
|
TestNestedHydration
|
python
|
doocs__leetcode
|
solution/1400-1499/1413.Minimum Value to Get Positive Step by Step Sum/Solution2.py
|
{
"start": 0,
"end": 157
}
|
class ____:
def minStartValue(self, nums: List[int]) -> int:
s = list(accumulate(nums))
return 1 if min(s) >= 0 else abs(min(s)) + 1
|
Solution
|
python
|
openai__openai-python
|
src/openai/types/beta/realtime/conversation_item_retrieve_event.py
|
{
"start": 235,
"end": 559
}
|
class ____(BaseModel):
item_id: str
"""The ID of the item to retrieve."""
type: Literal["conversation.item.retrieve"]
"""The event type, must be `conversation.item.retrieve`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
|
ConversationItemRetrieveEvent
|
python
|
dagster-io__dagster
|
python_modules/automation/automation_tests/dagster_docs_tests/test_python_ast_rule.py
|
{
"start": 447,
"end": 6676
}
|
class ____:
"""Test the _extract_python_code_blocks function."""
def test_extract_single_code_block(self):
"""Test extracting a single Python code block."""
docstring = """
This is a docstring.
.. code-block:: python
def hello():
return "world"
"""
blocks = _extract_python_code_blocks(docstring)
assert len(blocks) == 1
code, line_num = blocks[0]
assert "def hello():" in code
assert 'return "world"' in code
assert line_num == 6 # Line where code starts
def test_extract_multiple_code_blocks(self):
"""Test extracting multiple Python code blocks."""
docstring = """
This function has examples.
.. code-block:: python
x = 1
print(x)
Some text in between.
.. code-block:: python
def func():
pass
"""
blocks = _extract_python_code_blocks(docstring)
assert len(blocks) == 2
code1, line1 = blocks[0]
assert "x = 1" in code1
assert "print(x)" in code1
code2, line2 = blocks[1]
assert "def func():" in code2
assert "pass" in code2
assert line1 < line2 # Second block starts after first
def test_extract_no_code_blocks(self):
"""Test docstring with no Python code blocks."""
docstring = """
This is just a regular docstring.
Args:
param: A parameter
Returns:
Something
"""
blocks = _extract_python_code_blocks(docstring)
assert len(blocks) == 0
def test_extract_ignores_non_python_blocks(self):
"""Test that non-Python code blocks are ignored."""
docstring = """
This has various code blocks.
.. code-block:: yaml
key: value
invalid: yaml: syntax
.. code-block:: python
print("hello")
.. code-block:: bash
echo "world"
invalid-command --bad-syntax
"""
blocks = _extract_python_code_blocks(docstring)
assert len(blocks) == 1
code, _ = blocks[0]
assert 'print("hello")' in code
def test_extract_with_indented_code(self):
"""Test extracting code with proper indentation handling."""
docstring = """
Example with indented code.
.. code-block:: python
if condition:
for item in items:
process(item)
if item.special:
handle_special(item)
"""
blocks = _extract_python_code_blocks(docstring)
assert len(blocks) == 1
code, _ = blocks[0]
# Check that indentation is preserved relative to the first line
lines = code.split("\n")
assert lines[0] == "if condition:"
assert lines[1].startswith(" for item") # 4 spaces
assert lines[2].startswith(" process(item)") # 8 spaces
assert lines[3].startswith(" if item.special:") # 8 spaces
assert lines[4].startswith(" handle_special(item)") # 12 spaces
def test_extract_with_empty_lines_in_code(self):
"""Test code blocks with empty lines within the code."""
docstring = """
Example with empty lines.
.. code-block:: python
def function():
x = 1
# Comment after empty line
y = 2
return x + y
"""
blocks = _extract_python_code_blocks(docstring)
assert len(blocks) == 1
code, _ = blocks[0]
lines = code.split("\n")
assert "def function():" in lines[0]
assert lines[2] == "" # Empty line preserved
assert "# Comment after empty line" in lines[3]
assert lines[5] == "" # Another empty line preserved
assert "return x + y" in lines[6]
def test_extract_trailing_empty_lines_removed(self):
"""Test that trailing empty lines are removed from code blocks."""
docstring = """
Example with trailing empty lines.
.. code-block:: python
print("hello")
Some text after.
"""
blocks = _extract_python_code_blocks(docstring)
assert len(blocks) == 1
code, _ = blocks[0]
# Should not end with empty lines
assert not code.endswith("\n\n")
assert code.strip() == 'print("hello")'
def test_extract_with_various_directive_formats(self):
"""Test various formats of the code-block directive."""
docstring = """
Various directive formats.
.. code-block:: python
# Standard format
x = 1
..code-block::python
# No space after ..
y = 2
.. code-block::python
# No space before language
z = 3
"""
blocks = _extract_python_code_blocks(docstring)
# Only the first one should match (strict regex)
assert len(blocks) == 1
code, _ = blocks[0]
assert "x = 1" in code
def test_extract_empty_code_block(self):
"""Test handling of empty code blocks that are truly empty."""
docstring = """Empty code block.
.. code-block:: python
"""
blocks = _extract_python_code_blocks(docstring)
assert len(blocks) == 0 # Empty blocks are not included
def test_extract_code_block_no_blank_line(self):
"""Test code block without blank line after directive."""
docstring = """
No blank line after directive.
.. code-block:: python
print("hello")
"""
blocks = _extract_python_code_blocks(docstring)
assert len(blocks) == 1
code, _ = blocks[0]
assert 'print("hello")' in code
|
TestExtractPythonCodeBlocks
|
python
|
astropy__astropy
|
astropy/io/ascii/fastbasic.py
|
{
"start": 9243,
"end": 9690
}
|
class ____(FastBasic):
"""
A faster version of the ordinary :class:`Tab` reader that uses
the optimized C parsing engine.
"""
_format_name = "fast_tab"
_description = "Tab-separated values table using the fast C engine"
_fast = True
def __init__(self, **kwargs):
super().__init__({"delimiter": "\t"}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
|
FastTab
|
python
|
PyCQA__pylint
|
pylint/reporters/ureports/base_writer.py
|
{
"start": 725,
"end": 3440
}
|
class ____:
"""Base class for ureport writers."""
def format(
self,
layout: BaseLayout,
stream: TextIO = sys.stdout,
encoding: str | None = None,
) -> None:
"""Format and write the given layout into the stream object.
unicode policy: unicode strings may be found in the layout;
try to call 'stream.write' with it, but give it back encoded using
the given encoding if it fails
"""
if not encoding:
encoding = getattr(stream, "encoding", "UTF-8")
self.encoding = encoding or "UTF-8"
self.out = stream
self.begin_format()
layout.accept(self)
self.end_format()
def format_children(self, layout: EvaluationSection | Paragraph | Section) -> None:
"""Recurse on the layout children and call their accept method
(see the Visitor pattern).
"""
for child in getattr(layout, "children", ()):
child.accept(self)
def writeln(self, string: str = "") -> None:
"""Write a line in the output buffer."""
self.write(string + "\n")
def write(self, string: str) -> None:
"""Write a string in the output buffer."""
self.out.write(string)
def begin_format(self) -> None:
"""Begin to format a layout."""
self.section = 0
def end_format(self) -> None:
"""Finished formatting a layout."""
def get_table_content(self, table: Table) -> list[list[str]]:
"""Trick to get table content without actually writing it.
return an aligned list of lists containing table cells values as string
"""
result: list[list[str]] = [[]]
cols = table.cols
for cell in self.compute_content(table):
if cols == 0:
result.append([])
cols = table.cols
cols -= 1
result[-1].append(cell)
# fill missing cells
result[-1] += [""] * (cols - len(result[-1]))
return result
def compute_content(self, layout: BaseLayout) -> Iterator[str]:
"""Trick to compute the formatting of children layout before actually
writing it.
return an iterator on strings (one for each child element)
"""
# Patch the underlying output stream with a fresh-generated stream,
# which is used to store a temporary representation of a child
# node.
out = self.out
try:
for child in layout.children:
stream = StringIO()
self.out = stream
child.accept(self)
yield stream.getvalue()
finally:
self.out = out
|
BaseWriter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.