language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/air/util/tensor_extensions/arrow.py | {
"start": 25712,
"end": 36832
} | class ____(pa.ExtensionArray):
"""
An array of fixed-shape, homogeneous-typed tensors.
This is the Arrow side of TensorArray.
See Arrow docs for customizing extension arrays:
https://arrow.apache.org/docs/python/extending_types.html#custom-extension-array-class
"""
@classmethod
def from_numpy(
cls,
arr: Union[np.ndarray, Iterable[np.ndarray]],
column_name: Optional[str] = None,
) -> Union["ArrowTensorArray", "ArrowVariableShapedTensorArray"]:
"""
Convert an ndarray or an iterable of ndarrays to an array of homogeneous-typed
tensors. If given fixed-shape tensor elements, this will return an
``ArrowTensorArray``; if given variable-shape tensor elements, this will return
an ``ArrowVariableShapedTensorArray``.
Args:
arr: An ndarray or an iterable of ndarrays.
column_name: Optional. Used only in logging outputs to provide
additional details.
Returns:
- If fixed-shape tensor elements, an ``ArrowTensorArray`` containing
``len(arr)`` tensors of fixed shape.
- If variable-shaped tensor elements, an ``ArrowVariableShapedTensorArray``
containing ``len(arr)`` tensors of variable shape.
- If scalar elements, a ``pyarrow.Array``.
"""
if not isinstance(arr, np.ndarray) and isinstance(arr, Iterable):
arr = list(arr)
if isinstance(arr, (list, tuple)) and arr and isinstance(arr[0], np.ndarray):
# Stack ndarrays and pass through to ndarray handling logic below.
try:
arr = np.stack(arr, axis=0)
except ValueError as ve:
logger.warning(
f"Failed to stack lists due to: {ve}; "
f"falling back to using np.array(..., dtype=object)",
exc_info=ve,
)
# ndarray stacking may fail if the arrays are heterogeneously-shaped.
arr = np.array(arr, dtype=object)
if not isinstance(arr, np.ndarray):
raise ValueError(
f"Must give ndarray or iterable of ndarrays, got {type(arr)} {arr}"
)
try:
timestamp_dtype = _try_infer_pa_timestamp_type(arr)
if timestamp_dtype:
# NOTE: Quirky Arrow behavior will coerce unsupported Numpy `datetime64`
# precisions that are nested inside a list type, but won't do it,
# if these are top-level ndarray. To work this around we have to cast
# ndarray values manually
arr = _coerce_np_datetime_to_pa_timestamp_precision(
arr, timestamp_dtype, column_name
)
return cls._from_numpy(arr)
except Exception as e:
data_str = ""
if column_name:
data_str += f"column: '{column_name}', "
data_str += f"shape: {arr.shape}, dtype: {arr.dtype}, data: {arr}"
raise ArrowConversionError(data_str) from e
@classmethod
def _from_numpy(
cls,
arr: np.ndarray,
) -> Union["ArrowTensorArray", "ArrowVariableShapedTensorArray"]:
if len(arr) > 0 and np.isscalar(arr[0]):
# Elements are scalar so a plain Arrow Array will suffice.
return pa.array(arr)
if _is_ndarray_variable_shaped_tensor(arr):
# Tensor elements have variable shape, so we delegate to
# ArrowVariableShapedTensorArray.
return ArrowVariableShapedTensorArray.from_numpy(arr)
if not arr.flags.c_contiguous:
# We only natively support C-contiguous ndarrays.
arr = np.ascontiguousarray(arr)
scalar_dtype = pa.from_numpy_dtype(arr.dtype)
if pa.types.is_string(scalar_dtype):
if arr.dtype.byteorder == ">" or (
arr.dtype.byteorder == "=" and sys.byteorder == "big"
):
raise ValueError(
"Only little-endian string tensors are supported, "
f"but got: {arr.dtype}",
)
scalar_dtype = pa.binary(arr.dtype.itemsize)
outer_len = arr.shape[0]
element_shape = arr.shape[1:]
total_num_items = arr.size
num_items_per_element = np.prod(element_shape) if element_shape else 1
# Shape up data buffer
if pa.types.is_boolean(scalar_dtype):
# NumPy doesn't represent boolean arrays as bit-packed, so we manually
# bit-pack the booleans before handing the buffer off to Arrow.
# NOTE: Arrow expects LSB bit-packed ordering.
# NOTE: This creates a copy.
arr = np.packbits(arr, bitorder="little")
data_buffer = pa.py_buffer(arr)
data_array = pa.Array.from_buffers(
scalar_dtype, total_num_items, [None, data_buffer]
)
from ray.data import DataContext
if DataContext.get_current().use_arrow_tensor_v2:
pa_type_ = ArrowTensorTypeV2(element_shape, scalar_dtype)
else:
pa_type_ = ArrowTensorType(element_shape, scalar_dtype)
offset_dtype = pa_type_.OFFSET_DTYPE.to_pandas_dtype()
# Create offsets buffer
if num_items_per_element == 0:
offsets = np.zeros(outer_len + 1, dtype=offset_dtype)
else:
offsets = np.arange(
0,
(outer_len + 1) * num_items_per_element,
num_items_per_element,
dtype=offset_dtype,
)
offset_buffer = pa.py_buffer(offsets)
storage = pa.Array.from_buffers(
pa_type_.storage_type,
outer_len,
[None, offset_buffer],
children=[data_array],
)
return pa_type_.wrap_array(storage)
def to_numpy(self, zero_copy_only: bool = True):
"""
Convert the entire array of tensors into a single ndarray.
Args:
zero_copy_only: If True, an exception will be raised if the
conversion to a NumPy array would require copying the
underlying data (e.g. in presence of nulls, or for
non-primitive types). This argument is currently ignored, so
zero-copy isn't enforced even if this argument is true.
Returns:
A single ndarray representing the entire array of tensors.
"""
# Buffers layout: [None, offset_buffer, None, data_buffer]
buffers = self.buffers()
data_buffer = buffers[3]
storage_list_type = self.storage.type
value_type = storage_list_type.value_type
shape = self.type.shape
# Batch type checks
is_boolean = pa.types.is_boolean(value_type)
# Calculate buffer item width once
if is_boolean:
# Arrow boolean array buffers are bit-packed, with 8 entries per byte,
# and are accessed via bit offsets.
buffer_item_width = value_type.bit_width
else:
# We assume all other array types are accessed via byte array
# offsets.
buffer_item_width = value_type.bit_width // 8
# Number of items per inner ndarray.
num_items_per_element = np.prod(shape) if shape else 1
# Base offset into data buffer, e.g. due to zero-copy slice.
buffer_offset = self.offset * num_items_per_element
# Offset of array data in buffer.
offset = buffer_item_width * buffer_offset
# Update the shape for ndarray
shape = (len(self),) + shape
if is_boolean:
# Special handling for boolean arrays, since Arrow bit-packs boolean arrays
# while NumPy does not.
# Cast as uint8 array and let NumPy unpack into a boolean view.
# Offset into uint8 array, where each element is a bucket for 8 booleans.
byte_bucket_offset = offset // 8
# Offset for a specific boolean, within a uint8 array element.
bool_offset = offset % 8
# The number of uint8 array elements (buckets) that our slice spans.
# Note that, due to the offset for a specific boolean, the slice can span
# byte boundaries even if it contains less than 8 booleans.
num_boolean_byte_buckets = 1 + ((bool_offset + np.prod(shape) - 1) // 8)
# Construct the uint8 array view on the buffer.
arr = np.ndarray(
(num_boolean_byte_buckets,),
dtype=np.uint8,
buffer=data_buffer,
offset=byte_bucket_offset,
)
# Unpack into a byte per boolean, using LSB bit-packed ordering.
arr = np.unpackbits(arr, bitorder="little")
# Interpret buffer as boolean array.
return np.ndarray(shape, dtype=np.bool_, buffer=arr, offset=bool_offset)
# Special handling of binary/string types. Assumes unicode string tensor columns
if pa.types.is_fixed_size_binary(value_type):
ext_dtype = np.dtype(
f"<U{value_type.byte_width // NUM_BYTES_PER_UNICODE_CHAR}"
)
else:
ext_dtype = value_type.to_pandas_dtype()
return np.ndarray(shape, dtype=ext_dtype, buffer=data_buffer, offset=offset)
def to_var_shaped_tensor_array(
self,
ndim: int,
) -> "ArrowVariableShapedTensorArray":
"""
Convert this tensor array to a variable-shaped tensor array.
"""
shape = self.type.shape
if ndim < len(shape):
raise ValueError(
f"Can't convert {self.type} to var-shaped tensor type with {ndim=}"
)
# NOTE: For ``ArrowTensorTypeV2`` we can construct variable-shaped
# tensor directly w/o modifying its internal storage.
#
# For (deprecated) ``ArrowTensorType`` we fallback to converting to Numpy,
# and reconstructing.
if not isinstance(self.type, ArrowTensorTypeV2):
return ArrowVariableShapedTensorArray.from_numpy(self.to_numpy())
# Pad target shape with singleton axis to match target number of
# dimensions
# TODO avoid padding
target_shape = _pad_shape_with_singleton_axes(shape, ndim)
# Construct shapes array
shape_array = pa.nulls(
len(self.storage),
type=ArrowVariableShapedTensorArray.SHAPES_ARRAY_TYPE,
).fill_null(target_shape)
storage = pa.StructArray.from_arrays(
[self.storage, shape_array],
["data", "shape"],
)
target_type = ArrowVariableShapedTensorType(
self.type.scalar_type,
ndim=ndim,
)
return target_type.wrap_array(storage)
# ArrowExtensionSerializeDeserializeCache needs to be first in the MRO to ensure the cache is used
@PublicAPI(stability="alpha")
| ArrowTensorArray |
python | google__pytype | pytype/pyi/parser_test.py | {
"start": 95229,
"end": 96413
} | class ____(parser_test_base.ParserTestBase):
def test_starargs(self):
self.check(
"""
from typing_extensions import TypeVarTuple, Unpack
_Ts = TypeVarTuple('_Ts')
def f(*args: Unpack[_Ts]): ...
""",
"""
from typing import Any
from typing_extensions import TypeVarTuple, TypeVarTuple as _Ts, Unpack
def f(*args) -> Any: ...
""",
)
def test_callable(self):
self.check(
"""
from typing import Any, Callable
from typing_extensions import TypeVarTuple, Unpack
_Ts = TypeVarTuple('_Ts')
f: Callable[[Unpack[_Ts]], Any]
""",
"""
from typing import Any, Callable
from typing_extensions import TypeVarTuple, TypeVarTuple as _Ts, Unpack
f: Callable[..., Any]
""",
)
def test_tuple(self):
self.check(
"""
from typing_extensions import TypeVarTuple, Unpack
_Ts = TypeVarTuple('_Ts')
def f(x: tuple[Unpack[_Ts]]): ...
""",
"""
from typing import Any
from typing_extensions import TypeVarTuple, TypeVarTuple as _Ts, Unpack
def f(x: tuple[Any, ...]) -> Any: ...
""",
)
| UnpackTest |
python | apache__airflow | providers/apache/livy/tests/unit/apache/livy/hooks/test_livy.py | {
"start": 2119,
"end": 20359
} | class ____:
@classmethod
def setup_class(cls):
clear_test_connections(add_default_connections_back=False)
@classmethod
def teardown_class(cls):
clear_test_connections(add_default_connections_back=True)
# TODO: Potential performance issue, converted setup_class to a setup_connections function level fixture
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="http",
host=DEFAULT_HOST,
schema=DEFAULT_SCHEMA,
port=DEFAULT_PORT,
)
)
create_connection_without_db(Connection(conn_id="default_port", conn_type="http", host="http://host"))
create_connection_without_db(Connection(conn_id="default_protocol", conn_type="http", host="host"))
create_connection_without_db(Connection(conn_id="port_set", host="host", conn_type="http", port=1234))
create_connection_without_db(
Connection(conn_id="schema_set", host="host", conn_type="http", schema="https")
)
create_connection_without_db(
Connection(conn_id="dont_override_schema", conn_type="http", host="http://host", schema="https")
)
create_connection_without_db(Connection(conn_id="missing_host", conn_type="http", port=1234))
create_connection_without_db(Connection(conn_id="invalid_uri", uri="http://invalid_uri:4321"))
create_connection_without_db(
Connection(
conn_id="with_credentials", login="login", password="secret", conn_type="http", host="host"
)
)
@pytest.mark.parametrize(
("conn_id", "expected"),
[
pytest.param("default_port", "http://host", id="default-port"),
pytest.param("default_protocol", "http://host", id="default-protocol"),
pytest.param("port_set", "http://host:1234", id="with-defined-port"),
pytest.param("schema_set", "https://host", id="with-defined-schema"),
pytest.param("dont_override_schema", "http://host", id="ignore-defined-schema"),
],
)
def test_build_get_hook(self, conn_id, expected):
hook = LivyHook(livy_conn_id=conn_id)
hook.get_conn()
assert hook.base_url == expected
@pytest.mark.skip("Inherited HttpHook does not handle missing hostname")
def test_missing_host(self):
with pytest.raises(AirflowException):
LivyHook(livy_conn_id="missing_host").get_conn()
def test_build_body_minimal_request(self):
assert LivyHook.build_post_batch_body(file="appname") == {"file": "appname"}
def test_build_body_complex_request(self):
body = LivyHook.build_post_batch_body(
file="appname",
class_name="org.example.livy",
proxy_user="proxyUser",
args=["a", "1"],
jars=["jar1", "jar2"],
files=["file1", "file2"],
py_files=["py1", "py2"],
archives=["arch1", "arch2"],
queue="queue",
name="name",
conf={"a": "b"},
driver_cores=2,
driver_memory="1M",
executor_memory="1m",
executor_cores="1",
num_executors="10",
)
assert body == {
"file": "appname",
"className": "org.example.livy",
"proxyUser": "proxyUser",
"args": ["a", "1"],
"jars": ["jar1", "jar2"],
"files": ["file1", "file2"],
"pyFiles": ["py1", "py2"],
"archives": ["arch1", "arch2"],
"queue": "queue",
"name": "name",
"conf": {"a": "b"},
"driverCores": 2,
"driverMemory": "1M",
"executorMemory": "1m",
"executorCores": "1",
"numExecutors": "10",
}
def test_parameters_validation(self):
with pytest.raises(ValueError, match=INVALID_JAVA_SIZE_STRINGS):
LivyHook.build_post_batch_body(file="appname", executor_memory="xxx")
assert LivyHook.build_post_batch_body(file="appname", args=["a", 1, 0.1])["args"] == ["a", "1", "0.1"]
@pytest.mark.parametrize(
"size",
[
pytest.param("1m", id="lowercase-short"),
pytest.param("1mb", id="lowercase-long"),
pytest.param("1mb", id="uppercase-short"),
pytest.param("1GB", id="uppercase-long"),
pytest.param("1Gb", id="mix-case"),
pytest.param(None, id="none"),
],
)
def test_validate_size_format(self, size):
assert LivyHook._validate_size_format(size)
@pytest.mark.parametrize(
"size",
[
pytest.param("1Gb foo", id="fullmatch"),
pytest.param("10", id="missing size"),
pytest.param(1, id="integer"),
],
)
def test_validate_size_format_failed(self, size):
with pytest.raises(ValueError, match=rf"Invalid java size format for string'{size}'"):
assert LivyHook._validate_size_format(size)
@pytest.mark.parametrize(
"value",
[
pytest.param([1, "string"], id="list"),
pytest.param((1, "string"), id="tuple"),
pytest.param([], id="empty list"),
],
)
def test_validate_list_of_stringables(self, value):
assert LivyHook._validate_list_of_stringables(value)
@pytest.mark.parametrize(
"value",
[
pytest.param({"a": "a"}, id="dict"),
pytest.param([1, {}], id="invalid element"),
pytest.param(None, id="none"),
pytest.param(42, id="integer"),
pytest.param("foo-bar", id="string"),
],
)
def test_validate_list_of_stringables_failed(self, value):
with pytest.raises(ValueError, match="List of strings expected"):
assert LivyHook._validate_list_of_stringables(value)
@pytest.mark.parametrize(
"config",
[
pytest.param({"k1": "v1", "k2": 0}, id="valid dictionary config"),
pytest.param({}, id="empty dictionary"),
pytest.param(None, id="none"),
],
)
def test_validate_extra_conf(self, config):
LivyHook._validate_extra_conf(config)
@pytest.mark.parametrize(
"config",
[
pytest.param("k1=v1", id="string"),
pytest.param([("k1", "v1"), ("k2", 0)], id="list of tuples"),
],
)
def test_validate_extra_conf_failed_non_dict(self, config):
with pytest.raises(ValueError, match=CONF_MUST_BE_DICT):
LivyHook._validate_extra_conf(config)
@pytest.mark.parametrize(
"config",
[
pytest.param({"outer": {"inner": "val"}}, id="nested dictionary"),
pytest.param({"has_val": "val", "no_val": None}, id="none values in dictionary"),
pytest.param({"has_val": "val", "no_val": ""}, id="empty values in dictionary"),
],
)
def test_validate_extra_conf_failed_dict(self, config):
with pytest.raises(ValueError, match=CONF_VALUES_MUST_BE_STR_OR_INT):
LivyHook._validate_extra_conf(config)
@patch("airflow.providers.apache.livy.hooks.livy.LivyHook.run_method")
def test_post_batch_arguments(self, mock_request):
mock_request.return_value.status_code = 201
mock_request.return_value.json.return_value = {
"id": BATCH_ID,
"state": BatchState.STARTING.value,
"log": [],
}
resp = LivyHook().post_batch(file="sparkapp")
mock_request.assert_called_once_with(
method="POST", endpoint="/batches", data=json.dumps({"file": "sparkapp"}), headers={}
)
request_args = mock_request.call_args.kwargs
assert "data" in request_args
assert isinstance(request_args["data"], str)
assert isinstance(resp, int)
assert resp == BATCH_ID
def test_post_batch_success(self, requests_mock):
requests_mock.register_uri(
"POST",
"//livy:8998/batches",
json={"id": BATCH_ID, "state": BatchState.STARTING.value, "log": []},
status_code=201,
)
resp = LivyHook().post_batch(file="sparkapp")
assert isinstance(resp, int)
assert resp == BATCH_ID
def test_post_batch_fail(self, requests_mock):
requests_mock.register_uri("POST", f"{MATCH_URL}/batches", json={}, status_code=400, reason="ERROR")
with pytest.raises(AirflowException):
LivyHook().post_batch(file="sparkapp")
def test_get_batch_success(self, requests_mock):
requests_mock.register_uri(
"GET", f"{MATCH_URL}/batches/{BATCH_ID}", json={"id": BATCH_ID}, status_code=200
)
resp = LivyHook().get_batch(BATCH_ID)
assert isinstance(resp, dict)
assert "id" in resp
def test_get_batch_fail(self, requests_mock):
requests_mock.register_uri(
"GET",
f"{MATCH_URL}/batches/{BATCH_ID}",
json={"msg": "Unable to find batch"},
status_code=404,
reason="ERROR",
)
with pytest.raises(AirflowException):
LivyHook().get_batch(BATCH_ID)
@patch("airflow.providers.apache.livy.hooks.livy.LivyHook.run_method")
@patch("airflow.providers.apache.livy.hooks.livy.LivyHook.get_conn")
def test_post_batch_calls_get_conn_if_no_batch_id(self, mock_get_conn, mock_run_method):
# mock run_method to get rid of call get_conn in it
mock_response = MagicMock(resp=requests.Response)
mock_response.json.return_value = {"id": BATCH_ID, "state": BatchState.STARTING.value, "log": []}
mock_run_method.return_value = mock_response
hook = LivyHook()
# base_url is not set
hook.post_batch(file="sparkapp")
mock_get_conn.assert_called_once()
# base_url is set
mock_get_conn.reset_mock()
hook.base_url = "//livy:8998"
hook.post_batch(file="sparkapp")
mock_get_conn.assert_not_called()
def test_invalid_uri(self):
with pytest.raises(RequestException):
LivyHook(livy_conn_id="invalid_uri").post_batch(file="sparkapp")
def test_get_batch_state_success(self, requests_mock):
running = BatchState.RUNNING
requests_mock.register_uri(
"GET",
f"{MATCH_URL}/batches/{BATCH_ID}/state",
json={"id": BATCH_ID, "state": running.value},
status_code=200,
)
state = LivyHook().get_batch_state(BATCH_ID)
assert isinstance(state, BatchState)
assert state == running
def test_get_batch_state_fail(self, requests_mock):
requests_mock.register_uri(
"GET", f"{MATCH_URL}/batches/{BATCH_ID}/state", json={}, status_code=400, reason="ERROR"
)
with pytest.raises(AirflowException):
LivyHook().get_batch_state(BATCH_ID)
def test_get_batch_state_missing(self, requests_mock):
requests_mock.register_uri("GET", f"{MATCH_URL}/batches/{BATCH_ID}/state", json={}, status_code=200)
with pytest.raises(AirflowException):
LivyHook().get_batch_state(BATCH_ID)
def test_parse_post_response(self):
res_id = LivyHook._parse_post_response({"id": BATCH_ID, "log": []})
assert res_id == BATCH_ID
def test_delete_batch_success(self, requests_mock):
requests_mock.register_uri(
"DELETE", f"{MATCH_URL}/batches/{BATCH_ID}", json={"msg": "deleted"}, status_code=200
)
assert LivyHook().delete_batch(BATCH_ID) == {"msg": "deleted"}
def test_delete_batch_fail(self, requests_mock):
requests_mock.register_uri(
"DELETE", f"{MATCH_URL}/batches/{BATCH_ID}", json={}, status_code=400, reason="ERROR"
)
with pytest.raises(AirflowException):
LivyHook().delete_batch(BATCH_ID)
def test_missing_batch_id(self, requests_mock):
requests_mock.register_uri("POST", f"{MATCH_URL}/batches", json={}, status_code=201)
with pytest.raises(AirflowException):
LivyHook().post_batch(file="sparkapp")
@pytest.mark.parametrize("session_id", VALID_SESSION_ID_TEST_CASES)
def test_get_batch_validation(self, session_id, requests_mock):
requests_mock.register_uri(
"GET", f"{MATCH_URL}/batches/{session_id}", json=SAMPLE_GET_RESPONSE, status_code=200
)
assert LivyHook().get_batch(session_id) == SAMPLE_GET_RESPONSE
@pytest.mark.parametrize("session_id", INVALID_SESSION_ID_TEST_CASES)
def test_get_batch_validation_failed(self, session_id):
with pytest.raises(TypeError, match=r"\'session_id\' must be an integer"):
LivyHook().get_batch(session_id)
@pytest.mark.parametrize("session_id", VALID_SESSION_ID_TEST_CASES)
def test_get_batch_state_validation(self, session_id, requests_mock):
requests_mock.register_uri(
"GET", f"{MATCH_URL}/batches/{session_id}/state", json=SAMPLE_GET_RESPONSE, status_code=200
)
assert LivyHook().get_batch_state(session_id) == BatchState.SUCCESS
@pytest.mark.parametrize("session_id", INVALID_SESSION_ID_TEST_CASES)
def test_get_batch_state_validation_failed(self, session_id):
with pytest.raises(TypeError, match=r"\'session_id\' must be an integer"):
LivyHook().get_batch_state(session_id)
def test_delete_batch_validation(self, requests_mock):
requests_mock.register_uri(
"DELETE", f"{MATCH_URL}/batches/{BATCH_ID}", json={"id": BATCH_ID}, status_code=200
)
assert LivyHook().delete_batch(BATCH_ID) == {"id": BATCH_ID}
@pytest.mark.parametrize("session_id", INVALID_SESSION_ID_TEST_CASES)
def test_delete_batch_validation_failed(self, session_id):
with pytest.raises(TypeError, match=r"\'session_id\' must be an integer"):
LivyHook().delete_batch(session_id)
@pytest.mark.parametrize("session_id", VALID_SESSION_ID_TEST_CASES)
def test_check_session_id(self, session_id):
LivyHook._validate_session_id(session_id) # Should not raise any error
@pytest.mark.parametrize("session_id", INVALID_SESSION_ID_TEST_CASES)
def test_check_session_id_failed(self, session_id):
with pytest.raises(TypeError, match=r"\'session_id\' must be an integer"):
LivyHook._validate_session_id("asd")
def test_extra_headers(self, requests_mock):
requests_mock.register_uri(
"POST",
"//livy:8998/batches",
json={"id": BATCH_ID, "state": BatchState.STARTING.value, "log": []},
status_code=201,
request_headers={"X-Requested-By": "user"},
)
hook = LivyHook(extra_headers={"X-Requested-By": "user"})
hook.post_batch(file="sparkapp")
def test_alternate_auth_type(self):
auth_type = MagicMock()
hook = LivyHook(livy_conn_id="with_credentials", auth_type=auth_type)
auth_type.assert_not_called()
hook.get_conn()
auth_type.assert_called_once_with("login", "secret")
@patch("airflow.providers.apache.livy.hooks.livy.LivyHook.run_method")
def test_post_batch_with_endpoint_prefix(self, mock_request):
mock_request.return_value.status_code = 201
mock_request.return_value.json.return_value = {
"id": BATCH_ID,
"state": BatchState.STARTING.value,
"log": [],
}
resp = LivyHook(endpoint_prefix="/livy").post_batch(file="sparkapp")
mock_request.assert_called_once_with(
method="POST", endpoint="/livy/batches", data=json.dumps({"file": "sparkapp"}), headers={}
)
request_args = mock_request.call_args.kwargs
assert "data" in request_args
assert isinstance(request_args["data"], str)
assert isinstance(resp, int)
assert resp == BATCH_ID
def test_get_batch_with_endpoint_prefix(self, requests_mock):
requests_mock.register_uri(
"GET", f"{MATCH_URL}/livy/batches/{BATCH_ID}", json={"id": BATCH_ID}, status_code=200
)
resp = LivyHook(endpoint_prefix="/livy").get_batch(BATCH_ID)
assert isinstance(resp, dict)
assert "id" in resp
def test_get_batch_state_with_endpoint_prefix(self, requests_mock):
running = BatchState.RUNNING
requests_mock.register_uri(
"GET",
f"{MATCH_URL}/livy/batches/{BATCH_ID}/state",
json={"id": BATCH_ID, "state": running.value},
status_code=200,
)
state = LivyHook(endpoint_prefix="/livy").get_batch_state(BATCH_ID)
assert isinstance(state, BatchState)
assert state == running
def test_delete_batch_with_endpoint_prefix(self, requests_mock):
requests_mock.register_uri(
"DELETE", f"{MATCH_URL}/livy/batches/{BATCH_ID}", json={"msg": "deleted"}, status_code=200
)
assert LivyHook(endpoint_prefix="/livy").delete_batch(BATCH_ID) == {"msg": "deleted"}
@pytest.mark.parametrize(
"prefix",
["/livy/", "livy", "/livy", "livy/"],
ids=["leading_and_trailing_slashes", "no_slashes", "leading_slash", "trailing_slash"],
)
def test_endpoint_prefix_is_sanitized_simple(self, requests_mock, prefix):
requests_mock.register_uri(
"GET", f"{MATCH_URL}/livy/batches/{BATCH_ID}", json={"id": BATCH_ID}, status_code=200
)
resp = LivyHook(endpoint_prefix=prefix).get_batch(BATCH_ID)
assert isinstance(resp, dict)
assert "id" in resp
def test_endpoint_prefix_is_sanitized_multiple_path_elements(self, requests_mock):
requests_mock.register_uri(
"GET", f"{MATCH_URL}/livy/foo/bar/batches/{BATCH_ID}", json={"id": BATCH_ID}, status_code=200
)
resp = LivyHook(endpoint_prefix="/livy/foo/bar/").get_batch(BATCH_ID)
assert isinstance(resp, dict)
assert "id" in resp
| TestLivyDbHook |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_superfences.py | {
"start": 5507,
"end": 6342
} | class ____(util.MdCase):
"""Test highlight line wraps."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {
'pymdownx.highlight': {
'line_spans': '__my_span',
'linenums_style': 'inline'
}
}
def test_linespans(self):
"""Test wrapping a line in line spans."""
self.check_markdown(
r'''
```python linenums="2"
import test
```
''',
r'''
<div class="highlight"><pre><span></span><code><span id="__my_span-0-2"><span class="linenos">2</span><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</span></code></pre></div>
''', # noqa: E501
True
)
| TestHighlightLineWrapsInline |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 374707,
"end": 375071
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "column_edge")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
column_edge = sgqlc.types.Field("ProjectColumnEdge", graphql_name="columnEdge")
| MoveProjectColumnPayload |
python | walkccc__LeetCode | solutions/2297. Jump Game IX/2297.py | {
"start": 0,
"end": 531
} | class ____:
def minCost(self, nums: list[int], costs: list[int]) -> int:
# dp[i] := the minimum cost to jump to i
dp = [math.inf] * len(nums)
maxStack = []
minStack = []
dp[0] = 0
for i, num in enumerate(nums):
while maxStack and num >= nums[maxStack[-1]]:
dp[i] = min(dp[i], dp[maxStack.pop()] + costs[i])
while minStack and num < nums[minStack[-1]]:
dp[i] = min(dp[i], dp[minStack.pop()] + costs[i])
maxStack.append(i)
minStack.append(i)
return dp[-1]
| Solution |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_basic.py | {
"start": 40330,
"end": 42571
} | class ____(fixtures.MappedTest):
"""tests eager load/lazy load of child items off inheritance mappers, tests
that LazyLoader constructs the right query condition."""
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"bar",
metadata,
Column("id", Integer, ForeignKey("foo.id"), primary_key=True),
Column("bar_data", String(30)),
)
Table(
"bar_foo",
metadata,
Column("bar_id", Integer, ForeignKey("bar.id")),
Column("foo_id", Integer, ForeignKey("foo.id")),
)
@classmethod
def setup_mappers(cls):
foo, bar, bar_foo = cls.tables("foo", "bar", "bar_foo")
class Foo(cls.Comparable):
pass
class Bar(Foo):
pass
foos = cls.mapper_registry.map_imperatively(Foo, foo)
bars = cls.mapper_registry.map_imperatively(Bar, bar, inherits=foos)
bars.add_property("lazy", relationship(foos, bar_foo, lazy="select"))
bars.add_property(
"eager", relationship(foos, bar_foo, lazy="joined", viewonly=True)
)
@classmethod
def insert_data(cls, connection):
foo, bar, bar_foo = cls.tables("foo", "bar", "bar_foo")
connection.execute(foo.insert(), dict(data="foo1"))
connection.execute(bar.insert(), dict(id=1, data="bar1"))
connection.execute(foo.insert(), dict(data="foo2"))
connection.execute(bar.insert(), dict(id=2, data="bar2"))
connection.execute(foo.insert(), dict(data="foo3")) # 3
connection.execute(foo.insert(), dict(data="foo4")) # 4
connection.execute(bar_foo.insert(), dict(bar_id=1, foo_id=3))
connection.execute(bar_foo.insert(), dict(bar_id=2, foo_id=4))
def test_basic(self):
Bar = self.classes.Bar
sess = fixture_session()
q = sess.query(Bar)
self.assert_(len(q.first().lazy) == 1)
self.assert_(len(q.first().eager) == 1)
| EagerLazyTest |
python | pypa__pip | src/pip/_vendor/distlib/util.py | {
"start": 14846,
"end": 16281
} | class ____(object):
def __init__(self, func):
self.func = func
# for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
# obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
| cached_property |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/filters.py | {
"start": 9777,
"end": 12039
} | class ____(PosixTargetFilter[OriginConfig]):
"""Target filter for localhost."""
def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
"""Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
super().filter_targets(targets, exclude)
arch = detect_architecture(self.config.python.path)
if arch:
self.skip(f'skip/{arch}', f'which are not supported by {arch}', targets, exclude)
@cache
def get_host_target_type_map() -> dict[t.Type[HostConfig], t.Type[TargetFilter]]:
"""Create and return a mapping of HostConfig types to TargetFilter types."""
return get_type_map(TargetFilter, HostConfig)
def get_target_filter(args: IntegrationConfig, configs: list[HostConfig], controller: bool) -> TargetFilter:
"""Return an integration test target filter instance for the provided host configurations."""
target_type = type(configs[0])
if issubclass(target_type, ControllerConfig):
target_type = type(args.controller)
configs = [args.controller]
filter_type = get_host_target_type_map()[target_type]
filter_instance = filter_type(args, configs, controller)
return filter_instance
def get_remote_skip_aliases(config: RemoteConfig) -> dict[str, str]:
"""Return a dictionary of skip aliases and the reason why they apply."""
return get_platform_skip_aliases(config.platform, config.version, config.arch)
def get_platform_skip_aliases(platform: str, version: str, arch: t.Optional[str]) -> dict[str, str]:
"""Return a dictionary of skip aliases and the reason why they apply."""
skips = {
f'skip/{platform}': platform,
f'skip/{platform}/{version}': f'{platform} {version}',
f'skip/{platform}{version}': f'{platform} {version}', # legacy syntax, use above format
}
if arch:
skips.update({
f'skip/{arch}': arch,
f'skip/{arch}/{platform}': f'{platform} on {arch}',
f'skip/{arch}/{platform}/{version}': f'{platform} {version} on {arch}',
})
skips = {alias: f'which are not supported by {description}' for alias, description in skips.items()}
return skips
| OriginTargetFilter |
python | huggingface__transformers | src/transformers/models/chameleon/processing_chameleon.py | {
"start": 1114,
"end": 1476
} | class ____(ProcessingKwargs, total=False):
text_kwargs: ChameleonTextKwargs
_defaults = {
"text_kwargs": {
"padding": False,
"return_for_text_completion": False,
"return_mm_token_type_ids": False,
},
"common_kwargs": {
"return_tensors": "pt",
},
}
| ChameleonProcessorKwargs |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/dml.py | {
"start": 55205,
"end": 57923
} | class ____:
table: _DMLTableElement
_where_criteria: Tuple[ColumnElement[Any], ...] = ()
_post_criteria_clause: Optional[ClauseElement] = None
"""used by extensions to Update/Delete etc. to add additional syntacitcal
constructs, e.g. LIMIT etc.
.. versionadded:: 2.1
"""
# can't put position_map here either without HasSyntaxExtensions
# _position_map = util.immutabledict(
# {"post_criteria": "_post_criteria_clause"}
# )
@_generative
def where(self, *whereclause: _ColumnExpressionArgument[bool]) -> Self:
"""Return a new construct with the given expression(s) added to
its WHERE clause, joined to the existing clause via AND, if any.
Both :meth:`_dml.Update.where` and :meth:`_dml.Delete.where`
support multiple-table forms, including database-specific
``UPDATE...FROM`` as well as ``DELETE..USING``. For backends that
don't have multiple-table support, a backend agnostic approach
to using multiple tables is to make use of correlated subqueries.
See the linked tutorial sections below for examples.
.. seealso::
:ref:`tutorial_correlated_updates`
:ref:`tutorial_update_from`
:ref:`tutorial_multi_table_deletes`
"""
for criterion in whereclause:
where_criteria: ColumnElement[Any] = coercions.expect(
roles.WhereHavingRole, criterion, apply_propagate_attrs=self
)
self._where_criteria += (where_criteria,)
return self
def filter(self, *criteria: roles.ExpressionElementRole[Any]) -> Self:
"""A synonym for the :meth:`_dml.DMLWhereBase.where` method.
.. versionadded:: 1.4
"""
return self.where(*criteria)
def _filter_by_zero(self) -> _DMLTableElement:
return self.table
def filter_by(self, **kwargs: Any) -> Self:
r"""apply the given filtering criterion as a WHERE clause
to this select.
"""
from_entity = self._filter_by_zero()
clauses = [
_entity_namespace_key(from_entity, key) == value
for key, value in kwargs.items()
]
return self.filter(*clauses)
@property
def whereclause(self) -> Optional[ColumnElement[Any]]:
"""Return the completed WHERE clause for this :class:`.DMLWhereBase`
statement.
This assembles the current collection of WHERE criteria
into a single :class:`_expression.BooleanClauseList` construct.
.. versionadded:: 1.4
"""
return BooleanClauseList._construct_for_whereclause(
self._where_criteria
)
| DMLWhereBase |
python | falconry__falcon | tests/test_middleware.py | {
"start": 4692,
"end": 4842
} | class ____:
def setup_method(self, method):
# Clear context
global context
context = {'executed_methods': []}
| TestMiddleware |
python | networkx__networkx | networkx/utils/configs.py | {
"start": 168,
"end": 7370
} | class ____:
"""The base class for NetworkX configuration.
There are two ways to use this to create configurations. The recommended way
is to subclass ``Config`` with docs and annotations.
>>> class MyConfig(Config):
... '''Breakfast!'''
...
... eggs: int
... spam: int
...
... def _on_setattr(self, key, value):
... assert isinstance(value, int) and value >= 0
... return value
>>> cfg = MyConfig(eggs=1, spam=5)
Another way is to simply pass the initial configuration as keyword arguments to
the ``Config`` instance:
>>> cfg1 = Config(eggs=1, spam=5)
>>> cfg1
Config(eggs=1, spam=5)
Once defined, config items may be modified, but can't be added or deleted by default.
``Config`` is a ``Mapping``, and can get and set configs via attributes or brackets:
>>> cfg.eggs = 2
>>> cfg.eggs
2
>>> cfg["spam"] = 42
>>> cfg["spam"]
42
For convenience, it can also set configs within a context with the "with" statement:
>>> with cfg(spam=3):
... print("spam (in context):", cfg.spam)
spam (in context): 3
>>> print("spam (after context):", cfg.spam)
spam (after context): 42
Subclasses may also define ``_on_setattr`` (as done in the example above)
to ensure the value being assigned is valid:
>>> cfg.spam = -1
Traceback (most recent call last):
...
AssertionError
If a more flexible configuration object is needed that allows adding and deleting
configurations, then pass ``strict=False`` when defining the subclass:
>>> class FlexibleConfig(Config, strict=False):
... default_greeting: str = "Hello"
>>> flexcfg = FlexibleConfig()
>>> flexcfg.name = "Mr. Anderson"
>>> flexcfg
FlexibleConfig(default_greeting='Hello', name='Mr. Anderson')
"""
def __init_subclass__(cls, strict=True):
cls._strict = strict
def __new__(cls, **kwargs):
orig_class = cls
if cls is Config:
# Enable the "simple" case of accepting config definition as keywords
cls = type(
cls.__name__,
(cls,),
{"__annotations__": {key: typing.Any for key in kwargs}},
)
cls = dataclass(
eq=False,
repr=cls._strict,
slots=cls._strict,
kw_only=True,
match_args=False,
)(cls)
if not cls._strict:
cls.__repr__ = _flexible_repr
cls._orig_class = orig_class # Save original class so we can pickle
cls._prev = None # Stage previous configs to enable use as context manager
cls._context_stack = [] # Stack of previous configs when used as context
instance = object.__new__(cls)
instance.__init__(**kwargs)
return instance
def _on_setattr(self, key, value):
"""Process config value and check whether it is valid. Useful for subclasses."""
return value
def _on_delattr(self, key):
"""Callback for when a config item is being deleted. Useful for subclasses."""
# Control behavior of attributes
def __dir__(self):
return self.__dataclass_fields__.keys()
def __setattr__(self, key, value):
if self._strict and key not in self.__dataclass_fields__:
raise AttributeError(f"Invalid config name: {key!r}")
value = self._on_setattr(key, value)
object.__setattr__(self, key, value)
self.__class__._prev = None
def __delattr__(self, key):
if self._strict:
raise TypeError(
f"Configuration items can't be deleted (can't delete {key!r})."
)
self._on_delattr(key)
object.__delattr__(self, key)
self.__class__._prev = None
# Be a `collection.abc.Collection`
def __contains__(self, key):
return (
key in self.__dataclass_fields__ if self._strict else key in self.__dict__
)
def __iter__(self):
return iter(self.__dataclass_fields__ if self._strict else self.__dict__)
def __len__(self):
return len(self.__dataclass_fields__ if self._strict else self.__dict__)
def __reversed__(self):
return reversed(self.__dataclass_fields__ if self._strict else self.__dict__)
# Add dunder methods for `collections.abc.Mapping`
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError as err:
raise KeyError(*err.args) from None
def __setitem__(self, key, value):
try:
self.__setattr__(key, value)
except AttributeError as err:
raise KeyError(*err.args) from None
def __delitem__(self, key):
try:
self.__delattr__(key)
except AttributeError as err:
raise KeyError(*err.args) from None
_ipython_key_completions_ = __dir__ # config["<TAB>
# Go ahead and make it a `collections.abc.Mapping`
def get(self, key, default=None):
return getattr(self, key, default)
def items(self):
return collections.abc.ItemsView(self)
def keys(self):
return collections.abc.KeysView(self)
def values(self):
return collections.abc.ValuesView(self)
# dataclass can define __eq__ for us, but do it here so it works after pickling
def __eq__(self, other):
if not isinstance(other, Config):
return NotImplemented
return self._orig_class == other._orig_class and self.items() == other.items()
# Make pickle work
def __reduce__(self):
return self._deserialize, (self._orig_class, dict(self))
@staticmethod
def _deserialize(cls, kwargs):
return cls(**kwargs)
# Allow to be used as context manager
def __call__(self, **kwargs):
kwargs = {key: self._on_setattr(key, val) for key, val in kwargs.items()}
prev = dict(self)
for key, val in kwargs.items():
setattr(self, key, val)
self.__class__._prev = prev
return self
def __enter__(self):
if self.__class__._prev is None:
raise RuntimeError(
"Config being used as a context manager without config items being set. "
"Set config items via keyword arguments when calling the config object. "
"For example, using config as a context manager should be like:\n\n"
' >>> with cfg(breakfast="spam"):\n'
" ... ... # Do stuff\n"
)
self.__class__._context_stack.append(self.__class__._prev)
self.__class__._prev = None
return self
def __exit__(self, exc_type, exc_value, traceback):
prev = self.__class__._context_stack.pop()
for key, val in prev.items():
setattr(self, key, val)
def _flexible_repr(self):
return (
f"{self.__class__.__qualname__}("
+ ", ".join(f"{key}={val!r}" for key, val in self.__dict__.items())
+ ")"
)
# Register, b/c `Mapping.__subclasshook__` returns `NotImplemented`
collections.abc.Mapping.register(Config)
| Config |
python | allegroai__clearml | clearml/binding/environ_bind.py | {
"start": 3389,
"end": 11890
} | class ____(object):
_original_fork = None
_registered_fork_callbacks = False
_current_task = None
_original_process_run = None
@classmethod
def patch_fork(cls, task: "Task") -> None:
cls._current_task = task
if not task:
return
# first we need to patch regular fork
# because forked processes do not support atexit, they call os._exit directly)
# noinspection PyBroadException
try:
# only once
if cls._registered_fork_callbacks or cls._original_fork:
return
try:
os.register_at_fork(
before=PatchOsFork._fork_callback_before,
after_in_child=PatchOsFork._fork_callback_after_child,
)
cls._registered_fork_callbacks = True
except Exception:
# python <3.6
if six.PY2:
cls._original_fork = staticmethod(os.fork)
else:
cls._original_fork = os.fork
os.fork = cls._patched_fork
except Exception:
pass
# now we need to patch Process.run because the bootstrap code
# shuts everything down before calling os._exit that we patched above
try:
from multiprocessing.process import BaseProcess
PatchOsFork._original_process_run = BaseProcess.run
BaseProcess.run = PatchOsFork._patched_process_run
except: # noqa
pass
@staticmethod
def _patched_pool_worker(original_worker: Callable, *args: Any, **kwargs: Any) -> Any:
if not PatchOsFork._current_task:
return original_worker(*args, **kwargs)
try:
if len(args) >= 2 and hasattr(args[1], "put"):
args = list(args)
args[1] = SimpleQueueWrapper(PatchOsFork._current_task, args[1])
args = tuple(args)
elif "outqueue" in kwargs and hasattr(kwargs["outqueue"], "put"):
kwargs["outqueue"] = SimpleQueueWrapper(PatchOsFork._current_task, kwargs["outqueue"])
except: # noqa
pass
return original_worker(*args, **kwargs)
@staticmethod
def _patched_process_run(self, *args: Any, **kwargs: Any) -> None:
if not PatchOsFork._current_task:
return PatchOsFork._original_process_run(self, *args, **kwargs)
try:
from ..task import Task
task = Task.current_task()
except: # noqa
task = None
# check if this is Process Pool function
patched_worker = False
if hasattr(self, "_target"):
# Now we have to patch Pool, because pool terminates subprocess directly after
# the return value of the pool worker function is pushed into the queue,
# which means it will terminate the process before we finish running our "atexit" call
try:
if self._target == pool.worker: # noqa
self._target = partial(PatchOsFork._patched_pool_worker, pool.worker) # noqa
patched_worker = True
except: # noqa
pass
try:
return PatchOsFork._original_process_run(self, *args, **kwargs)
finally:
if task and patched_worker:
try:
# noinspection PyProtectedMember
if task._report_subprocess_enabled:
# just in case, remove at exit hooks, we will deadlock when the
# main Pool manager will terminate this process, and it will...
# noinspection PyProtectedMember
task._at_exit_called = True
else:
# terminate the current Task
# noinspection PyProtectedMember
task._at_exit()
except: # noqa
pass
@staticmethod
def _fork_callback_before() -> None:
if not PatchOsFork._current_task:
return
from ..task import Task
# ensure deferred is done, but never try to generate a Task object
# noinspection PyProtectedMember
task = Task._Task__main_task
# this will force the deferred init call to finish
# noinspection PyProtectedMember
Task._wait_for_deferred(task)
@staticmethod
def _fork_callback_after_child() -> None:
if not PatchOsFork._current_task:
return
from ..task import Task
# force creating a Task
task = Task.current_task()
if not task:
return
if not Task._report_subprocess_enabled:
# https://stackoverflow.com/a/34507557
# NOTICE: subprocesses do not exit through exit we have to register signals
if task._Task__exit_hook:
task._Task__exit_hook.register_signal_and_exception_hooks()
else:
# noinspection PyProtectedMember
task._remove_signal_hooks()
# noinspection PyProtectedMember
if Task._report_subprocess_enabled:
# noinspection PyProtectedMember
task._remove_exception_hooks()
PatchOsFork._current_task = task
# # Hack: now make sure we setup the reporter threads (Log+Reporter)
# noinspection PyProtectedMember
if not bool(task._report_subprocess_enabled):
BackgroundMonitor.start_all(task=task)
# if we are reporting into a subprocess, no need to further patch the exit functions
if Task._report_subprocess_enabled:
return
# The signal handler method is Not enough, for the time being, we have both
# even though it makes little sense
# # if we got here patch the os._exit of our instance to call us
def _at_exit_callback(*a_args: Any, **a_kwargs: Any) -> None:
# just make sure we flush the internal state (the at exist caught by the external signal does the rest
# in theory we should not have to do any of that, but for some reason if we do not
# the signal is never caught by the signal call backs, not sure why....
sleep(0.1)
# Since at_exist handlers do not work on forked processes, we have to manually call them here
if task:
try:
# not to worry there is a double _at_exit protection implemented inside task._at_exit()
# noinspection PyProtectedMember
task._at_exit()
except: # noqa
pass
# noinspection PyProtectedMember, PyUnresolvedReferences
return os._org_exit(*a_args, **a_kwargs)
if not hasattr(os, "_org_exit"):
# noinspection PyProtectedMember, PyUnresolvedReferences
os._org_exit = os._exit
# noinspection PyProtectedMember
# https://stackoverflow.com/a/34507557
# NOTICE: subprocesses do not exit through exit, and in most cases not with _exit,
# this means at_exit calls are Not registered respected
os._exit = _at_exit_callback
@staticmethod
def _patched_fork(*args: Any, **kwargs: Any) -> int:
if not PatchOsFork._current_task:
return PatchOsFork._original_fork(*args, **kwargs)
PatchOsFork._fork_callback_before()
ret = PatchOsFork._original_fork(*args, **kwargs)
if not PatchOsFork._current_task:
return ret
# Make sure the new process stdout is logged
if not ret:
PatchOsFork._fork_callback_after_child()
return ret
@staticmethod
def unpatch_fork() -> None:
try:
if PatchOsFork._original_fork and os._exit != PatchOsFork._original_fork:
os._exit = PatchOsFork._original_fork
PatchOsFork._original_fork = None
except Exception:
pass
@staticmethod
def unpatch_process_run() -> None:
try:
from multiprocessing.process import BaseProcess
if PatchOsFork._original_process_run and BaseProcess.run != PatchOsFork._original_process_run:
BaseProcess.run = PatchOsFork._original_process_run
PatchOsFork._original_process_run = None
except Exception:
pass
| PatchOsFork |
python | lepture__authlib | authlib/jose/rfc7515/jws.py | {
"start": 731,
"end": 13717
} | class ____:
#: Registered Header Parameter Names defined by Section 4.1
REGISTERED_HEADER_PARAMETER_NAMES = frozenset(
[
"alg",
"jku",
"jwk",
"kid",
"x5u",
"x5c",
"x5t",
"x5t#S256",
"typ",
"cty",
"crit",
]
)
MAX_CONTENT_LENGTH: int = 256000
#: Defined available JWS algorithms in the registry
ALGORITHMS_REGISTRY = {}
def __init__(self, algorithms=None, private_headers=None):
self._private_headers = private_headers
self._algorithms = algorithms
@classmethod
def register_algorithm(cls, algorithm):
if not algorithm or algorithm.algorithm_type != "JWS":
raise ValueError(f"Invalid algorithm for JWS, {algorithm!r}")
cls.ALGORITHMS_REGISTRY[algorithm.name] = algorithm
def serialize_compact(self, protected, payload, key):
"""Generate a JWS Compact Serialization. The JWS Compact Serialization
represents digitally signed or MACed content as a compact, URL-safe
string, per `Section 7.1`_.
.. code-block:: text
BASE64URL(UTF8(JWS Protected Header)) || '.' ||
BASE64URL(JWS Payload) || '.' ||
BASE64URL(JWS Signature)
:param protected: A dict of protected header
:param payload: A bytes/string of payload
:param key: Private key used to generate signature
:return: byte
"""
jws_header = JWSHeader(protected, None)
self._validate_private_headers(protected)
self._validate_crit_headers(protected)
algorithm, key = self._prepare_algorithm_key(protected, payload, key)
protected_segment = json_b64encode(jws_header.protected)
payload_segment = urlsafe_b64encode(to_bytes(payload))
# calculate signature
signing_input = b".".join([protected_segment, payload_segment])
signature = urlsafe_b64encode(algorithm.sign(signing_input, key))
return b".".join([protected_segment, payload_segment, signature])
def deserialize_compact(self, s, key, decode=None):
"""Exact JWS Compact Serialization, and validate with the given key.
If key is not provided, the returned dict will contain the signature,
and signing input values. Via `Section 7.1`_.
:param s: text of JWS Compact Serialization
:param key: key used to verify the signature
:param decode: a function to decode payload data
:return: JWSObject
:raise: BadSignatureError
.. _`Section 7.1`: https://tools.ietf.org/html/rfc7515#section-7.1
"""
if len(s) > self.MAX_CONTENT_LENGTH:
raise ValueError("Serialization is too long.")
try:
s = to_bytes(s)
signing_input, signature_segment = s.rsplit(b".", 1)
protected_segment, payload_segment = signing_input.split(b".", 1)
except ValueError as exc:
raise DecodeError("Not enough segments") from exc
protected = _extract_header(protected_segment)
self._validate_crit_headers(protected)
jws_header = JWSHeader(protected, None)
payload = _extract_payload(payload_segment)
if decode:
payload = decode(payload)
signature = _extract_signature(signature_segment)
rv = JWSObject(jws_header, payload, "compact")
algorithm, key = self._prepare_algorithm_key(jws_header, payload, key)
if algorithm.verify(signing_input, signature, key):
return rv
raise BadSignatureError(rv)
def serialize_json(self, header_obj, payload, key):
"""Generate a JWS JSON Serialization. The JWS JSON Serialization
represents digitally signed or MACed content as a JSON object,
per `Section 7.2`_.
:param header_obj: A dict/list of header
:param payload: A string/dict of payload
:param key: Private key used to generate signature
:return: JWSObject
Example ``header_obj`` of JWS JSON Serialization::
{
"protected: {"alg": "HS256"},
"header": {"kid": "jose"}
}
Pass a dict to generate flattened JSON Serialization, pass a list of
header dict to generate standard JSON Serialization.
"""
payload_segment = json_b64encode(payload)
def _sign(jws_header):
self._validate_private_headers(jws_header)
# RFC 7515 §4.1.11: 'crit' MUST be integrity-protected.
# Reject if present in unprotected header, and validate only
# against the protected header parameters.
self._reject_unprotected_crit(jws_header.header)
self._validate_crit_headers(jws_header.protected)
_alg, _key = self._prepare_algorithm_key(jws_header, payload, key)
protected_segment = json_b64encode(jws_header.protected)
signing_input = b".".join([protected_segment, payload_segment])
signature = urlsafe_b64encode(_alg.sign(signing_input, _key))
rv = {
"protected": to_unicode(protected_segment),
"signature": to_unicode(signature),
}
if jws_header.header is not None:
rv["header"] = jws_header.header
return rv
if isinstance(header_obj, dict):
data = _sign(JWSHeader.from_dict(header_obj))
data["payload"] = to_unicode(payload_segment)
return data
signatures = [_sign(JWSHeader.from_dict(h)) for h in header_obj]
return {"payload": to_unicode(payload_segment), "signatures": signatures}
def deserialize_json(self, obj, key, decode=None):
"""Exact JWS JSON Serialization, and validate with the given key.
If key is not provided, it will return a dict without signature
verification. Header will still be validated. Via `Section 7.2`_.
:param obj: text of JWS JSON Serialization
:param key: key used to verify the signature
:param decode: a function to decode payload data
:return: JWSObject
:raise: BadSignatureError
.. _`Section 7.2`: https://tools.ietf.org/html/rfc7515#section-7.2
"""
obj = ensure_dict(obj, "JWS")
payload_segment = obj.get("payload")
if payload_segment is None:
raise DecodeError('Missing "payload" value')
payload_segment = to_bytes(payload_segment)
payload = _extract_payload(payload_segment)
if decode:
payload = decode(payload)
if "signatures" not in obj:
# flattened JSON JWS
jws_header, valid = self._validate_json_jws(
payload_segment, payload, obj, key
)
rv = JWSObject(jws_header, payload, "flat")
if valid:
return rv
raise BadSignatureError(rv)
headers = []
is_valid = True
for header_obj in obj["signatures"]:
jws_header, valid = self._validate_json_jws(
payload_segment, payload, header_obj, key
)
headers.append(jws_header)
if not valid:
is_valid = False
rv = JWSObject(headers, payload, "json")
if is_valid:
return rv
raise BadSignatureError(rv)
def serialize(self, header, payload, key):
"""Generate a JWS Serialization. It will automatically generate a
Compact or JSON Serialization depending on the given header. If a
header is in a JSON header format, it will call
:meth:`serialize_json`, otherwise it will call
:meth:`serialize_compact`.
:param header: A dict/list of header
:param payload: A string/dict of payload
:param key: Private key used to generate signature
:return: byte/dict
"""
if isinstance(header, (list, tuple)):
return self.serialize_json(header, payload, key)
if "protected" in header:
return self.serialize_json(header, payload, key)
return self.serialize_compact(header, payload, key)
def deserialize(self, s, key, decode=None):
"""Deserialize JWS Serialization, both compact and JSON format.
It will automatically deserialize depending on the given JWS.
:param s: text of JWS Compact/JSON Serialization
:param key: key used to verify the signature
:param decode: a function to decode payload data
:return: dict
:raise: BadSignatureError
If key is not provided, it will still deserialize the serialization
without verification.
"""
if isinstance(s, dict):
return self.deserialize_json(s, key, decode)
s = to_bytes(s)
if s.startswith(b"{") and s.endswith(b"}"):
return self.deserialize_json(s, key, decode)
return self.deserialize_compact(s, key, decode)
def _prepare_algorithm_key(self, header, payload, key):
if "alg" not in header:
raise MissingAlgorithmError()
alg = header["alg"]
if self._algorithms is not None and alg not in self._algorithms:
raise UnsupportedAlgorithmError()
if alg not in self.ALGORITHMS_REGISTRY:
raise UnsupportedAlgorithmError()
algorithm = self.ALGORITHMS_REGISTRY[alg]
if callable(key):
key = key(header, payload)
elif key is None and "jwk" in header:
key = header["jwk"]
key = algorithm.prepare_key(key)
return algorithm, key
def _validate_private_headers(self, header):
# only validate private headers when developers set
# private headers explicitly
if self._private_headers is not None:
names = self.REGISTERED_HEADER_PARAMETER_NAMES.copy()
names = names.union(self._private_headers)
for k in header:
if k not in names:
raise InvalidHeaderParameterNameError(k)
def _reject_unprotected_crit(self, unprotected_header):
"""Reject 'crit' when found in the unprotected header (RFC 7515 §4.1.11)."""
if unprotected_header and "crit" in unprotected_header:
raise InvalidHeaderParameterNameError("crit")
def _validate_crit_headers(self, header):
if "crit" in header:
crit_headers = header["crit"]
# Type enforcement for robustness and predictable errors
if not isinstance(crit_headers, list) or not all(
isinstance(x, str) for x in crit_headers
):
raise InvalidHeaderParameterNameError("crit")
names = self.REGISTERED_HEADER_PARAMETER_NAMES.copy()
if self._private_headers:
names = names.union(self._private_headers)
for k in crit_headers:
if k not in names:
raise InvalidCritHeaderParameterNameError(k)
elif k not in header:
raise InvalidCritHeaderParameterNameError(k)
def _validate_json_jws(self, payload_segment, payload, header_obj, key):
protected_segment = header_obj.get("protected")
if not protected_segment:
raise DecodeError('Missing "protected" value')
signature_segment = header_obj.get("signature")
if not signature_segment:
raise DecodeError('Missing "signature" value')
protected_segment = to_bytes(protected_segment)
protected = _extract_header(protected_segment)
header = header_obj.get("header")
if header and not isinstance(header, dict):
raise DecodeError('Invalid "header" value')
# RFC 7515 §4.1.11: 'crit' MUST be integrity-protected. If present in
# the unprotected header object, reject the JWS.
self._reject_unprotected_crit(header)
# Enforce must-understand semantics for names listed in protected
# 'crit'. This will also ensure each listed name is present in the
# protected header.
self._validate_crit_headers(protected)
jws_header = JWSHeader(protected, header)
algorithm, key = self._prepare_algorithm_key(jws_header, payload, key)
signing_input = b".".join([protected_segment, payload_segment])
signature = _extract_signature(to_bytes(signature_segment))
if algorithm.verify(signing_input, signature, key):
return jws_header, True
return jws_header, False
def _extract_header(header_segment):
return extract_header(header_segment, DecodeError)
def _extract_signature(signature_segment):
return extract_segment(signature_segment, DecodeError, "signature")
def _extract_payload(payload_segment):
return extract_segment(payload_segment, DecodeError, "payload")
| JsonWebSignature |
python | getsentry__sentry-python | sentry_sdk/consts.py | {
"start": 23797,
"end": 24547
} | class ____:
"""
The status of a Sentry span.
See: https://develop.sentry.dev/sdk/event-payloads/contexts/#trace-context
"""
ABORTED = "aborted"
ALREADY_EXISTS = "already_exists"
CANCELLED = "cancelled"
DATA_LOSS = "data_loss"
DEADLINE_EXCEEDED = "deadline_exceeded"
FAILED_PRECONDITION = "failed_precondition"
INTERNAL_ERROR = "internal_error"
INVALID_ARGUMENT = "invalid_argument"
NOT_FOUND = "not_found"
OK = "ok"
OUT_OF_RANGE = "out_of_range"
PERMISSION_DENIED = "permission_denied"
RESOURCE_EXHAUSTED = "resource_exhausted"
UNAUTHENTICATED = "unauthenticated"
UNAVAILABLE = "unavailable"
UNIMPLEMENTED = "unimplemented"
UNKNOWN_ERROR = "unknown_error"
| SPANSTATUS |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 4437,
"end": 4568
} | class ____(Book):
"""Book proxy model to have a separate admin url access and name"""
class Meta:
proxy = True
| EBook |
python | huggingface__transformers | src/transformers/models/aya_vision/modeling_aya_vision.py | {
"start": 1738,
"end": 3950
} | class ____(nn.Module):
def __init__(self, config: AyaVisionConfig):
super().__init__()
self.config = config
self.downsample_factor = config.downsample_factor
self.alignment_intermediate_size = getattr(
config, "alignment_intermediate_size", config.text_config.hidden_size
)
self.layernorm = nn.LayerNorm(
config.vision_config.hidden_size * (config.downsample_factor**2), eps=config.adapter_layer_norm_eps
)
self.linear_1 = nn.Linear(
config.vision_config.hidden_size * (config.downsample_factor**2),
self.alignment_intermediate_size,
bias=True,
)
self.act = ACT2FN["silu"] # SwiGLU uses SiLU activation
# For SwiGLU, project down to half size since we split intermediate dim
self.linear_2 = nn.Linear(self.alignment_intermediate_size // 2, config.text_config.hidden_size, bias=True)
def forward(self, image_features):
image_features = self.pixel_shuffle(image_features)
image_features = self.layernorm(image_features)
hidden_states = self.linear_1(image_features)
# Split along last dimension and apply SwiGLU
x, gate = hidden_states.chunk(2, dim=-1)
hidden_states = self.act(gate) * x
hidden_states = self.linear_2(hidden_states)
return hidden_states
def pixel_shuffle(self, image_features): # B, S, D
batch_size, seq_length, feature_dim = image_features.shape
height = width = int(seq_length**0.5)
image_features = image_features.reshape(image_features.shape[0], width, height, -1)
channels = image_features.shape[-1]
image_features = image_features.reshape(
batch_size, width, int(height / self.downsample_factor), int(channels * self.downsample_factor)
)
image_features = image_features.permute(0, 2, 1, 3)
image_features = image_features.reshape(
batch_size, int(height / self.downsample_factor), int(width / self.downsample_factor), -1
)
image_features = image_features.permute(0, 2, 1, 3)
return image_features
@auto_docstring
| AyaVisionMultiModalProjector |
python | networkx__networkx | networkx/readwrite/graphml.py | {
"start": 12942,
"end": 15527
} | class ____:
NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
# xmlns:y="http://www.yworks.com/xml/graphml"
NS_Y = "http://www.yworks.com/xml/graphml"
SCHEMALOCATION = " ".join(
[
"http://graphml.graphdrawing.org/xmlns",
"http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd",
]
)
def construct_types(self):
types = [
(int, "integer"), # for Gephi GraphML bug
(str, "yfiles"),
(str, "string"),
(int, "int"),
(int, "long"),
(float, "float"),
(float, "double"),
(bool, "boolean"),
]
# These additions to types allow writing numpy types
try:
import numpy as np
except:
pass
else:
# prepend so that python types are created upon read (last entry wins)
types = [
(np.float64, "float"),
(np.float32, "float"),
(np.float16, "float"),
(np.int_, "int"),
(np.int8, "int"),
(np.int16, "int"),
(np.int32, "int"),
(np.int64, "int"),
(np.uint8, "int"),
(np.uint16, "int"),
(np.uint32, "int"),
(np.uint64, "int"),
(np.int_, "int"),
(np.intc, "int"),
(np.intp, "int"),
] + types
self.xml_type = dict(types)
self.python_type = dict(reversed(a) for a in types)
# This page says that data types in GraphML follow Java(TM).
# http://graphml.graphdrawing.org/primer/graphml-primer.html#AttributesDefinition
# true and false are the only boolean literals:
# http://en.wikibooks.org/wiki/Java_Programming/Literals#Boolean_Literals
convert_bool = {
# We use data.lower() in actual use.
"true": True,
"false": False,
# Include integer strings for convenience.
"0": False,
0: False,
"1": True,
1: True,
}
def get_xml_type(self, key):
"""Wrapper around the xml_type dict that raises a more informative
exception message when a user attempts to use data of a type not
supported by GraphML."""
try:
return self.xml_type[key]
except KeyError as err:
raise TypeError(
f"GraphML does not support type {key} as data values."
) from err
| GraphML |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride3.py | {
"start": 1999,
"end": 2226
} | class ____(G1[[], str]):
# This should generate an error because the specialized
# signature of f in the base class has no positional parameters.
def f(self, a: int, b: int) -> str: ...
def g(self) -> str: ...
| G5 |
python | numpy__numpy | numpy/_core/tests/test_dtype.py | {
"start": 24148,
"end": 32048
} | class ____:
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (1, 2, 3)))
b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', ())))
assert_dtype_equal(np.dtype('(1,)f8'), np.dtype(('f8', 1)))
assert np.dtype(('f8', 1)).shape == (1,)
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
def test_shape_simple(self):
"""Test some simple cases that shouldn't be equal"""
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
def test_shape_monster(self):
"""Test some more complicated cases that shouldn't be equal"""
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
def test_shape_sequence(self):
# Any sequence of integers should work as shape, but the result
# should be a tuple (immutable) of base type integers.
a = np.array([1, 2, 3], dtype=np.int16)
l = [1, 2, 3]
# Array gets converted
dt = np.dtype([('a', 'f4', a)])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
# List gets converted
dt = np.dtype([('a', 'f4', l)])
assert_(isinstance(dt['a'].shape, tuple))
#
class IntLike:
def __index__(self):
return 3
def __int__(self):
# (a PyNumber_Check fails without __int__)
return 3
dt = np.dtype([('a', 'f4', IntLike())])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
dt = np.dtype([('a', 'f4', (IntLike(),))])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
def test_shape_matches_ndim(self):
dt = np.dtype([('a', 'f4', ())])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4')])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4', 4)])
assert_equal(dt['a'].shape, (4,))
assert_equal(dt['a'].ndim, 1)
dt = np.dtype([('a', 'f4', (1, 2, 3))])
assert_equal(dt['a'].shape, (1, 2, 3))
assert_equal(dt['a'].ndim, 3)
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
max_intp = np.iinfo(np.intp).max
# Too large values (the datatype is part of this)
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
# Takes a different code path (fails earlier:
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
# Negative values
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
def test_alignment(self):
# Check that subarrays are aligned
t1 = np.dtype('(1,)i4', align=True)
t2 = np.dtype('2i4', align=True)
assert_equal(t1.alignment, t2.alignment)
def test_aligned_empty(self):
# Mainly regression test for gh-19696: construction failed completely
dt = np.dtype([], align=True)
assert dt == np.dtype([])
dt = np.dtype({"names": [], "formats": [], "itemsize": 0}, align=True)
assert dt == np.dtype([])
def test_subarray_base_item(self):
arr = np.ones(3, dtype=[("f", "i", 3)])
# Extracting the field "absorbs" the subarray into a view:
assert arr["f"].base is arr
# Extract the structured item, and then check the tuple component:
item = arr.item(0)
assert type(item) is tuple and len(item) == 1
assert item[0].base is arr
def test_subarray_cast_copies(self):
# Older versions of NumPy did NOT copy, but they got the ownership
# wrong (not actually knowing the correct base!). Versions since 1.21
# (I think) crashed fairly reliable. This defines the correct behavior
# as a copy. Keeping the ownership would be possible (but harder)
arr = np.ones(3, dtype=[("f", "i", 3)])
cast = arr.astype(object)
for fields in cast:
assert type(fields) == tuple and len(fields) == 1
subarr = fields[0]
assert subarr.base is None
assert subarr.flags.owndata
def iter_struct_object_dtypes():
"""
Iterates over a few complex dtypes and object pattern which
fill the array with a given object (defaults to a singleton).
Yields
------
dtype : dtype
pattern : tuple
Structured tuple for use with `np.array`.
count : int
Number of objects stored in the dtype.
singleton : object
A singleton object. The returned pattern is constructed so that
all objects inside the datatype are set to the singleton.
"""
obj = object()
dt = np.dtype([('b', 'O', (2, 3))])
p = ([[obj] * 3] * 2,)
yield pytest.param(dt, p, 6, obj, id="<subarray>")
dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
p = (0, [[obj] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
p = (0, [[(obj, 0)] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
p = (0, [[(obj, obj)] * 3] * 2)
yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
@pytest.mark.skipif(
sys.version_info >= (3, 12),
reason="Python 3.12 has immortal refcounts, this test will no longer "
"work. See gh-23986"
)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
| TestSubarray |
python | scipy__scipy | scipy/stats/_distn_infrastructure.py | {
"start": 147046,
"end": 153570
} | class ____(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
if not len(set(np.ravel(xk))) == np.size(xk):
raise ValueError("xk may not contain duplicate values.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in rv_generic.__setstate__,
# which calls rv_generic._attach_methods
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created argparser methods."""
self._attach_argparser_methods()
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self, size=None, random_state=None):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = random_state.uniform(size=size)
if size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return stats.entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _expect(self, fun, lb, ub, *args, **kwds):
# ignore all args, just do a brute force summation
supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
vals = fun(supp)
return np.sum(vals)
def _check_shape(argshape, size):
"""
This is a utility function used by `_rvs()` in the class geninvgauss_gen.
It compares the tuple argshape to the tuple size.
Parameters
----------
argshape : tuple of integers
Shape of the arguments.
size : tuple of integers or integer
Size argument of rvs().
Returns
-------
The function returns two tuples, scalar_shape and bc.
scalar_shape : tuple
Shape to which the 1-d array of random variates returned by
_rvs_scalar() is converted when it is copied into the
output array of _rvs().
bc : tuple of booleans
bc is an tuple the same length as size. bc[j] is True if the data
associated with that index is generated in one call of _rvs_scalar().
"""
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
"""Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| rv_sample |
python | huggingface__transformers | src/transformers/models/voxtral/modeling_voxtral.py | {
"start": 9046,
"end": 9614
} | class ____(PreTrainedModel):
config: VoxtralConfig
base_model_prefix = "model"
input_modalities = ("audio", "text")
supports_gradient_checkpointing = True
_no_split_modules = None
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_cache_class = True
_supports_attention_backend = True
_can_compile_fullgraph = True
@auto_docstring(
custom_intro="""
The Voxtral encoder, which is a Whisper encoder.
"""
)
| VoxtralPreTrainedModel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess22.py | {
"start": 224,
"end": 358
} | class ____:
@classmethod
def method1(cls) -> None:
cls.method2
@contextmanager
def method2(self):
yield
| A |
python | ray-project__ray | python/ray/data/tests/test_arrow_serialization.py | {
"start": 19358,
"end": 20796
} | class ____(pa.ExtensionType):
def __init__(
self,
value_type: pa.DataType,
ndim: int,
) -> None:
self.value_type = value_type
self.ndim = ndim
super().__init__(
pa.struct(
[
pa.field("data", pa.list_(value_type)),
pa.field("shape", pa.list_(pa.int32(), ndim)),
]
),
"variable_shape_tensor",
)
def __arrow_ext_serialize__(self) -> bytes:
return b""
@classmethod
def __arrow_ext_deserialize__(cls, storage_type: pa.DataType, serialized: bytes):
ndim = storage_type[1].type.list_size
value_type = storage_type[0].type.value_type
return cls(value_type, ndim)
def test_variable_shape_tensor_serialization():
t = _VariableShapeTensorType(pa.float32(), 2)
values = [
{
"data": np.arange(2 * 3, dtype=np.float32).tolist(),
"shape": [2, 3],
},
{
"data": np.arange(4 * 5, dtype=np.float32).tolist(),
"shape": [4, 5],
},
]
storage = pa.array(values, type=t.storage_type)
ar = pa.ExtensionArray.from_storage(t, storage)
payload = PicklableArrayPayload.from_array(ar)
ar2 = payload.to_array()
assert ar == ar2
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| _VariableShapeTensorType |
python | numpy__numpy | numpy/ma/core.py | {
"start": 78959,
"end": 82068
} | class ____:
"""
Flat iterator object to iterate over masked arrays.
A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array
`x`. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
MaskedArray.flat : Return a flat iterator over an array.
MaskedArray.flatten : Returns a flattened copy of an array.
Notes
-----
`MaskedIterator` is not exported by the `ma` module. Instead of
instantiating a `MaskedIterator` directly, use `MaskedArray.flat`.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array(arange(6).reshape(2, 3))
>>> fl = x.flat
>>> type(fl)
<class 'numpy.ma.MaskedIterator'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
Extracting more than a single element b indexing the `MaskedIterator`
returns a masked array:
>>> fl[2:4]
masked_array(data = [2 3],
mask = False,
fill_value = 999999)
"""
def __init__(self, ma):
self.ma = ma
self.dataiter = ma._data.flat
if ma._mask is nomask:
self.maskiter = None
else:
self.maskiter = ma._mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
result = self.dataiter.__getitem__(indx).view(type(self.ma))
if self.maskiter is not None:
_mask = self.maskiter.__getitem__(indx)
if isinstance(_mask, ndarray):
# set shape to match that of data; this is needed for matrices
_mask.shape = result.shape
result._mask = _mask
elif isinstance(_mask, np.void):
return mvoid(result, mask=_mask, hardmask=self.ma._hardmask)
elif _mask: # Just a scalar, masked
return masked
return result
# This won't work if ravel makes a copy
def __setitem__(self, index, value):
self.dataiter[index] = getdata(value)
if self.maskiter is not None:
self.maskiter[index] = getmaskarray(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> next(fl)
3
>>> next(fl)
masked
>>> next(fl)
Traceback (most recent call last):
...
StopIteration
"""
d = next(self.dataiter)
if self.maskiter is not None:
m = next(self.maskiter)
if isinstance(m, np.void):
return mvoid(d, mask=m, hardmask=self.ma._hardmask)
elif m: # Just a scalar, masked
return masked
return d
@set_module("numpy.ma")
| MaskedIterator |
python | pytorch__pytorch | test/inductor/test_remote_cache.py | {
"start": 618,
"end": 872
} | class ____(RemoteCache):
def __init__(self):
super().__init__(FailingBackend(), RemoteCachePassthroughSerde())
def _create_sample(self):
return TestSample()
def _log_sample(self, sample):
self.sample = sample
| FakeCache |
python | huggingface__transformers | src/transformers/models/aria/modular_aria.py | {
"start": 54583,
"end": 54918
} | class ____(LlamaModel):
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[AriaTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
self.post_init()
| AriaTextModel |
python | apache__airflow | providers/google/src/airflow/providers/google/firebase/operators/firestore.py | {
"start": 1248,
"end": 3938
} | class ____(BaseOperator):
"""
Export documents from Google Cloud Firestore to another storage system, such as Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudFirestoreExportDatabaseOperator`
:param database_id: The Database ID.
:param body: The request body.
See:
https://firebase.google.com/docs/firestore/reference/rest/v1beta1/projects.databases/exportDocuments
:param project_id: ID of the Google Cloud project if None then
default project_id is used.
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param api_version: API version used (for example v1 or v1beta1).
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"body",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
def __init__(
self,
*,
body: dict,
database_id: str = "(default)",
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.database_id = database_id
self.body = body
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self._validate_inputs()
self.impersonation_chain = impersonation_chain
def _validate_inputs(self) -> None:
if not self.body:
raise AirflowException("The required parameter 'body' is missing")
def execute(self, context: Context):
hook = CloudFirestoreHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
return hook.export_documents(database_id=self.database_id, body=self.body, project_id=self.project_id)
| CloudFirestoreExportDatabaseOperator |
python | sympy__sympy | sympy/physics/quantum/spin.py | {
"start": 48954,
"end": 49777
} | class ____(CoupledSpinState, Ket):
"""Coupled eigenket of Jx.
See JzKetCoupled for the usage of coupled spin eigenstates.
See Also
========
JzKetCoupled: Usage of coupled spin states
"""
@classmethod
def dual_class(self):
return JxBraCoupled
@classmethod
def uncoupled_class(self):
return JxKet
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JxOp(self, basis, **options):
return self._represent_coupled_base(**options)
def _represent_JyOp(self, basis, **options):
return self._represent_coupled_base(alpha=pi*Rational(3, 2), **options)
def _represent_JzOp(self, basis, **options):
return self._represent_coupled_base(beta=pi/2, **options)
| JxKetCoupled |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/totalOrdering1.py | {
"start": 118,
"end": 413
} | class ____:
val1: int
def __gt__(self, other: object) -> bool: ...
a = ClassA()
b = ClassA()
v1 = a < b
v2 = a <= b
v3 = a > b
v4 = a >= b
v5 = a == b
v6 = a != b
# This should generate an error because it doesn't declare
# any of the required ordering functions.
@total_ordering
| ClassA |
python | pytorch__pytorch | test/torch_np/numpy_tests/linalg/test_linalg.py | {
"start": 64964,
"end": 66424
} | class ____(TestCase):
# TODO: are there no other tests for cholesky?
@parametrize("shape", [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)])
@parametrize("dtype", (np.float32, np.float64, np.complex64, np.complex128))
def test_basic_property(self, shape, dtype):
# Check A = L L^H
np.random.seed(1)
a = np.random.randn(*shape)
if np.issubdtype(dtype, np.complexfloating):
a = a + 1j * np.random.randn(*shape)
t = list(range(len(shape)))
t[-2:] = -1, -2
a = np.matmul(a.transpose(t).conj(), a)
a = np.asarray(a, dtype=dtype)
c = np.linalg.cholesky(a)
b = np.matmul(c, c.transpose(t).conj())
atol = 500 * a.shape[0] * np.finfo(dtype).eps
assert_allclose(b, a, atol=atol, err_msg=f"{shape} {dtype}\n{a}\n{c}")
def test_0_size(self):
# class ArraySubclass(np.ndarray):
# pass
a = np.zeros((0, 1, 1), dtype=np.int_) # .view(ArraySubclass)
res = linalg.cholesky(a)
assert_equal(a.shape, res.shape)
assert_(res.dtype.type is np.float64)
# for documentation purpose:
assert_(isinstance(res, np.ndarray))
a = np.zeros((1, 0, 0), dtype=np.complex64) # .view(ArraySubclass)
res = linalg.cholesky(a)
assert_equal(a.shape, res.shape)
assert_(res.dtype.type is np.complex64)
assert_(isinstance(res, np.ndarray))
| TestCholesky |
python | keon__algorithms | algorithms/graph/tarjan.py | {
"start": 265,
"end": 2408
} | class ____:
"""
A directed graph used for finding strongly connected components
"""
def __init__(self, dict_graph):
self.graph = DirectedGraph(dict_graph)
self.index = 0
self.stack = []
# Runs Tarjan
# Set all node index to None
for vertex in self.graph.nodes:
vertex.index = None
self.sccs = []
for vertex in self.graph.nodes:
if vertex.index is None:
self.strongconnect(vertex, self.sccs)
def strongconnect(self, vertex, sccs):
"""
Given a vertex, adds all successors of the given vertex to the same connected component
"""
# Set the depth index for v to the smallest unused index
vertex.index = self.index
vertex.lowlink = self.index
self.index += 1
self.stack.append(vertex)
vertex.on_stack = True
# Consider successors of v
for adjacent in self.graph.adjacency_list[vertex]:
if adjacent.index is None:
# Successor w has not yet been visited; recurse on it
self.strongconnect(adjacent, sccs)
vertex.lowlink = min(vertex.lowlink, adjacent.lowlink)
elif adjacent.on_stack:
# Successor w is in stack S and hence in the current SCC
# If w is not on stack, then (v, w) is a cross-edge in the DFS
# tree and must be ignored
# Note: The next line may look odd - but is correct.
# It says w.index not w.lowlink; that is deliberate and from the original paper
vertex.lowlink = min(vertex.lowlink, adjacent.index)
# If v is a root node, pop the stack and generate an SCC
if vertex.lowlink == vertex.index:
# start a new strongly connected component
scc = []
while True:
adjacent = self.stack.pop()
adjacent.on_stack = False
scc.append(adjacent)
if adjacent == vertex:
break
scc.sort()
sccs.append(scc)
| Tarjan |
python | huggingface__transformers | src/transformers/models/esm/modeling_esmfold.py | {
"start": 7862,
"end": 9511
} | class ____(nn.Linear):
"""
A Linear layer with built-in nonstandard initializations. Called just like torch.nn.Linear.
Implements the initializers in 1.11.4, plus some additional ones found in the code.
"""
def __init__(
self,
in_dim: int,
out_dim: int,
bias: bool = True,
init: str = "default",
init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,
):
"""
Args:
in_dim:
The final dimension of inputs to the layer
out_dim:
The final dimension of layer outputs
bias:
Whether to learn an additive bias. True by default
init:
The initializer to use. Choose from:
"default": LeCun fan-in truncated normal initialization "relu": He initialization w/ truncated normal
distribution "glorot": Fan-average Glorot uniform initialization "gating": Weights=0, Bias=1 "normal":
Normal initialization with std=1/sqrt(fan_in) "final": Weights=0, Bias=0
Overridden by init_fn if the latter is not None.
init_fn:
A custom initializer taking weight and bias as inputs. Overrides init if not None.
"""
super().__init__(in_dim, out_dim, bias=bias)
if bias:
with torch.no_grad():
self.bias.fill_(0)
self.init = init
self.init_fn = init_fn
if init not in ["default", "relu", "glorot", "gating", "normal", "final"]:
raise ValueError("Invalid init string.")
| EsmFoldLinear |
python | graphql-python__graphene | graphene/tests/issues/test_313.py | {
"start": 140,
"end": 207
} | class ____(graphene.ObjectType):
yeah = graphene.String()
| Success |
python | ray-project__ray | rllib/utils/tests/test_checkpointable.py | {
"start": 227,
"end": 3863
} | class ____(unittest.TestCase):
"""Tests the Checkpointable API."""
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_checkpoint_backward_compatibility(self):
"""Tests backward compat. of checkpoints created with older versions of ray."""
# Get the directory of the current script
old_checkpoints_dir = Path(__file__).parent.resolve() / "old_checkpoints"
from ray.rllib.utils.tests.old_checkpoints.current_config import (
config as current_config,
)
for ray_version_dir in old_checkpoints_dir.iterdir():
import re
if not ray_version_dir.is_dir() or not re.search(
r"\Wray_[0-9_]+$", str(ray_version_dir)
):
continue
# Unzip checkpoint for that ray version into a temp directory.
with TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Extract the zip file to the temporary directory
shutil.unpack_archive(ray_version_dir / "checkpoint.zip", temp_path)
# Restore the algorithm from the (old) msgpack-checkpoint, using the
# current Ray version's `config` object.
algo = PPO.from_checkpoint(path=temp_dir, config=current_config)
learner_res = algo.train()[LEARNER_RESULTS]
# Assert that the correct per-policy learning rates were used.
assert (
learner_res["p0"]["default_optimizer_learning_rate"] == 0.00005
and learner_res["p1"]["default_optimizer_learning_rate"] == 0.0001
)
algo.stop()
# Second experiment: Add all the policies to the config again that were
# present when the checkpoint was taken and try `from_checkpoint` again.
expanded_config = current_config.copy(copy_frozen=False)
all_pols = {"p0", "p1", "p2", "p3"}
expanded_config.multi_agent(
policies=all_pols,
# Create some completely new mapping function (that has nothing to
# do with the checkpointed one).
policy_mapping_fn=(
lambda aid, eps, _p=tuple(all_pols), **kw: random.choice(_p)
),
policies_to_train=all_pols,
)
expanded_config.rl_module(
algorithm_config_overrides_per_module={
"p2": PPOConfig.overrides(lr=0.002),
"p3": PPOConfig.overrides(lr=0.003),
}
)
algo = PPO.from_checkpoint(path=temp_dir, config=expanded_config)
learner_res = algo.train()[LEARNER_RESULTS]
# Assert that the correct per-policy learning rates were used.
assert (
learner_res["p0"]["default_optimizer_learning_rate"] == 0.00005
and learner_res["p1"]["default_optimizer_learning_rate"] == 0.0001
and learner_res["p2"]["default_optimizer_learning_rate"] == 0.002
and learner_res["p3"]["default_optimizer_learning_rate"] == 0.003
)
algo.stop()
print(f"Algorithm restored and trained once. Learner results={learner_res}.")
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestCheckpointable |
python | ray-project__ray | python/ray/experimental/channel/conftest.py | {
"start": 2148,
"end": 2266
} | class ____:
def __init__(self):
self.cuda_stream = 0
def synchronize(self):
pass
| MockCudaStream |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 53989,
"end": 54250
} | class ____(Response):
"""
Response of events.clear_scroll endpoint.
"""
_service = "events"
_action = "clear_scroll"
_version = "2.20"
_schema = {"additionalProperties": False, "definitions": {}, "type": "object"}
| ClearScrollResponse |
python | getsentry__sentry | src/sentry/integrations/msteams/card_builder/utils.py | {
"start": 3835,
"end": 5516
} | class ____:
# NOTE: DATE and TIME are formatting functions in Adaptive Cards.
# The syntax is `{{DATE(<some_date>, SHORT)}}` or `{{TIME(<some_date>)}}`
# https://docs.microsoft.com/en-us/adaptive-cards/authoring-cards/text-features
# Since `{` and `}` are special characters in format strings, we need to use
# double `{{` and `}}` to get the actual character in. Hence the `{{{{` and `}}}}`.
DATE_FORMAT = "{{{{DATE({date}, SHORT)}}}} at {{{{TIME({date})}}}}"
ASSIGNEE_NOTE = "**Assigned to {assignee}**"
RESOLVE = "Resolve"
RESOLVE_INPUT_ID = "resolveInput"
RESOLVE_INPUT_CHOICES = [
("Immediately", "resolved"),
("In the current release", "resolved:inCurrentRelease"),
("In the next release", "resolved:inNextRelease"),
]
UNRESOLVE = "Unresolve"
ARCHIVE = "Archive"
ARCHIVE_INPUT_ID = "archiveInput"
ARCHIVE_INPUT_TITLE = "Archive until this happens again..."
ARCHIVE_INPUT_CHOICES = [
("Archive forever", -1),
("1 time", 1),
("10 times", 10),
("100 times", 100),
("1,000 times", 1000),
("10,000 times", 10000),
]
UNARCHIVE = "Unarchive"
ASSIGN = "Assign"
ASSIGN_INPUT_TITLE = "Assign to..."
ASSIGN_INPUT_ID = "assignInput"
UNASSIGN = "Unassign"
translator = str.maketrans({"&": "&", "<": "<", ">": ">", "_": "\\_"})
def escape_markdown_special_chars(text: str) -> str:
"""
Convert markdown special characters to markdown friendly alternatives.
docs - https://docs.microsoft.com/en-us/adaptive-cards/authoring-cards/text-features
"""
return text.translate(translator)
| IssueConstants |
python | pypa__hatch | tests/backend/version/scheme/test_standard.py | {
"start": 4537,
"end": 5092
} | class ____:
@pytest.mark.parametrize(
("operations", "expected"),
[
("patch,dev,release", "1!0.0.2"),
("fix,rc", "1!0.0.2rc0"),
("minor,dev", "1!0.1.0.dev0"),
("minor,preview", "1!0.1.0rc0"),
("major,beta", "1!1.0.0b0"),
("major,major,major", "1!3.0.0"),
],
)
def test_correct(self, isolation, operations, expected):
scheme = StandardScheme(str(isolation), {})
assert scheme.update(operations, "1!0.0.1", {}) == expected
| TestWithEpoch |
python | pytorch__pytorch | test/test_autocast.py | {
"start": 395,
"end": 6534
} | class ____(TestAutocast):
def setUp(self):
super().setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device("cpu"))
def tearDown(self):
del self.autocast_lists
super().tearDown()
@skipIfTorchDynamo()
def test_autocast_torch_expect_builtin_promote(self):
for (
op,
args1,
args2,
out_type,
) in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(
op, args1, torch.float32, device="cpu", out_type=out_type
)
self._run_autocast_outofplace(
op,
args2,
torch.float32,
device="cpu",
out_type=out_type,
amp_dtype=torch.float16,
)
@skipIfTorchDynamo()
def test_autocast_methods_expect_builtin_promote(self):
for (
op,
args1,
args2,
out_type,
) in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(
op, args1, torch.float32, device="cpu", module=None, out_type=out_type
)
self._run_autocast_outofplace(
op,
args2,
torch.float32,
device="cpu",
module=None,
out_type=out_type,
amp_dtype=torch.float16,
)
@skipIfTorchDynamo()
def test_autocast_torch_16(self):
for op_with_args in self.autocast_lists.torch_16:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op, args, torch.bfloat16, device="cpu", add_kwargs=maybe_kwargs
)
self._run_autocast_outofplace(
op,
args,
torch.float16,
device="cpu",
add_kwargs=maybe_kwargs,
amp_dtype=torch.float16,
)
@skipIfTorchDynamo()
def test_autocast_nn_16(self):
for op_with_args in self.autocast_lists.nn_16:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op,
args,
torch.bfloat16,
device="cpu",
module=torch._C._nn,
add_kwargs=maybe_kwargs,
)
self._run_autocast_outofplace(
op,
args,
torch.float16,
device="cpu",
module=torch._C._nn,
add_kwargs=maybe_kwargs,
amp_dtype=torch.float16,
)
@skipIfTorchDynamo()
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op, args, torch.float32, device="cpu", add_kwargs=maybe_kwargs
)
self._run_autocast_outofplace(
op,
args,
torch.float32,
device="cpu",
add_kwargs=maybe_kwargs,
amp_dtype=torch.float16,
)
@skipIfTorchDynamo()
def test_autocast_nn_fp32(self):
for op_with_args in self.autocast_lists.nn_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op,
args,
torch.float32,
device="cpu",
module=torch._C._nn,
add_kwargs=maybe_kwargs,
)
self._run_autocast_outofplace(
op,
args,
torch.float32,
device="cpu",
module=torch._C._nn,
add_kwargs=maybe_kwargs,
amp_dtype=torch.float16,
)
@skipIfTorchDynamo()
def test_autocast_torch_need_autocast_promote(self):
for op, args1, args2 in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args1, torch.float32, device="cpu")
self._run_autocast_outofplace(
op, args2, torch.float32, device="cpu", amp_dtype=torch.float16
)
def test_autocast_rnn(self):
if (
torch.backends.mkldnn.is_available()
and torch.ops.mkldnn._is_mkldnn_bf16_supported()
):
x = torch.randn(1, 2, 1)
hx = torch.randn(2, 2, 1)
cx = torch.randn(2, 2, 1)
m = torch.nn.LSTM(1, 1, 2).to(torch.bfloat16)
# Raise ValueError when autocast is not enabled
with self.assertRaisesRegex(ValueError, "input must have the type"):
m(x, (hx, cx))
# Should be able to run the below case with autocast
with torch.amp.autocast(device_type="cpu"):
m(x, (hx, cx))
def test_autocast_disabled_with_fp32_dtype(self):
with torch.autocast(device_type="cpu", dtype=torch.float32, enabled=False):
_ = torch.ones(10)
def test_generic_autocast(self):
for op_with_args in self.autocast_lists.torch_16:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
with torch.amp.autocast(device_type="cpu"):
generic_autocast_output = getattr(torch, op)(*args, **maybe_kwargs)
with torch.amp.autocast(device_type="cpu"):
cpu_autocast_output = getattr(torch, op)(*args, **maybe_kwargs)
self.assertEqual(generic_autocast_output, cpu_autocast_output)
def test_cpu_autocast_deprecated_warning(self):
with self.assertWarnsRegex(
FutureWarning,
r"`torch.cpu.amp.autocast\(args...\)` is deprecated. Please use `torch.amp.autocast\('cpu', args...\)` instead.",
):
with torch.cpu.amp.autocast():
_ = torch.ones(10)
| TestAutocastCPU |
python | coleifer__peewee | tests/manytomany.py | {
"start": 452,
"end": 570
} | class ____(TestModel):
text = TextField()
users = ManyToManyField(User, through_model=AltThroughDeferred)
| AltNote |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/base.py | {
"start": 108158,
"end": 108777
} | class ____(ReflectedNamedType):
"""Represents a reflected enum."""
type: str
"""The string name of the underlying data type of the domain."""
nullable: bool
"""Indicates if the domain allows null or not."""
default: Optional[str]
"""The string representation of the default value of this domain
or ``None`` if none present.
"""
constraints: List[ReflectedDomainConstraint]
"""The constraints defined in the domain, if any.
The constraint are in order of evaluation by postgresql.
"""
collation: Optional[str]
"""The collation for the domain."""
| ReflectedDomain |
python | mlflow__mlflow | tests/llama_index/sample_code/query_engine_with_reranker.py | {
"start": 625,
"end": 1093
} | class ____(BaseNodePostprocessor):
call_count: int = 0
def _postprocess_nodes(
self, nodes: list[NodeWithScore], query_bundle: QueryBundle | None
) -> list[NodeWithScore]:
# subtracts 1 from the score
self.call_count += 1
return nodes
query_engine = index.as_query_engine(
similarity_top_k=5,
node_postprocessors=[reranker, CustomNodePostprocessor()],
)
mlflow.models.set_model(query_engine)
| CustomNodePostprocessor |
python | pdm-project__pdm | src/pdm/models/search.py | {
"start": 396,
"end": 2305
} | class ____(HTMLParser):
"""A simple HTML parser for pypi.org search results."""
def __init__(self) -> None:
super().__init__()
self.results: list[SearchResult] = []
self._current: Result | None = None
self._nest_anchors = 0
self._data_callback: Callable[[str], None] | None = None
@staticmethod
def _match_class(attrs: list[tuple[str, str | None]], name: str) -> bool:
attrs_map = dict(attrs)
return name in (attrs_map.get("class") or "").split()
def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
if not self._current:
if tag == "a" and self._match_class(attrs, "package-snippet"):
self._current = Result()
self._nest_anchors = 1
else:
if tag == "span" and self._match_class(attrs, "package-snippet__name"):
self._data_callback = functools.partial(setattr, self._current, "name")
elif tag == "span" and self._match_class(attrs, "package-snippet__version"):
self._data_callback = functools.partial(setattr, self._current, "version")
elif tag == "p" and self._match_class(attrs, "package-snippet__description"):
self._data_callback = functools.partial(setattr, self._current, "description")
elif tag == "a":
self._nest_anchors += 1
def handle_data(self, data: str) -> None:
if self._data_callback is not None:
self._data_callback(data)
self._data_callback = None
def handle_endtag(self, tag: str) -> None:
if tag != "a" or self._current is None:
return
self._nest_anchors -= 1
if self._nest_anchors == 0:
if self._current.name:
self.results.append(self._current.as_frozen())
self._current = None
| SearchResultParser |
python | h5py__h5py | h5py/tests/test_group.py | {
"start": 33673,
"end": 34595
} | class ____(TestCase):
"""
Bugs: Specific regressions for external links
"""
def test_issue_212(self):
""" Issue 212
Fails with:
AttributeError: 'SharedConfig' object has no attribute 'lapl'
"""
def closer(x):
def w():
try:
if x:
x.close()
except OSError:
pass
return w
orig_name = self.mktemp()
new_name = self.mktemp()
f = File(orig_name, 'w')
self.addCleanup(closer(f))
f.create_group('a')
f.close()
g = File(new_name, 'w')
self.addCleanup(closer(g))
g['link'] = ExternalLink(orig_name, '/') # note root group
g.close()
h = File(new_name, 'r')
self.addCleanup(closer(h))
self.assertIsInstance(h['link']['a'], Group)
| TestExtLinkBugs |
python | ansible__ansible | lib/ansible/plugins/action/pause.py | {
"start": 1039,
"end": 5674
} | class ____(ActionBase):
""" pauses execution for a length or time, or until input is received """
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=None):
""" run the pause action module """
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
validation_result, new_module_args = self.validate_argument_spec(
argument_spec={
'echo': {'type': 'bool', 'default': True},
'minutes': {'type': int}, # Don't break backwards compat, allow floats, by using int callable
'seconds': {'type': int}, # Don't break backwards compat, allow floats, by using int callable
'prompt': {'type': 'str'},
},
mutually_exclusive=(
('minutes', 'seconds'),
),
)
duration_unit = 'minutes'
prompt = None
seconds = None
echo = new_module_args['echo']
echo_prompt = ''
result.update(dict(
changed=False,
rc=0,
stderr='',
stdout='',
start=None,
stop=None,
delta=None,
echo=echo
))
# Add a note saying the output is hidden if echo is disabled
if not echo:
echo_prompt = ' (output is hidden)'
if new_module_args['prompt']:
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), new_module_args['prompt'], echo_prompt)
else:
# If no custom prompt is specified, set a default prompt
prompt = "[%s]\n%s%s:" % (self._task.get_name().strip(), 'Press enter to continue, Ctrl+C to interrupt', echo_prompt)
if new_module_args['minutes'] is not None:
seconds = new_module_args['minutes'] * 60
elif new_module_args['seconds'] is not None:
seconds = new_module_args['seconds']
duration_unit = 'seconds'
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = to_text(datetime.datetime.now())
result['user_input'] = b''
default_input_complete = None
if seconds is not None:
if seconds < 1:
seconds = 1
# show the timer and control prompts
display.display("Pausing for %d seconds%s" % (seconds, echo_prompt))
# show the prompt specified in the task
if new_module_args['prompt']:
display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r")
else:
# corner case where enter does not continue, wait for timeout/interrupt only
prompt = "(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"
# don't complete on LF/CR; we expect a timeout/interrupt and ignore user input when a pause duration is specified
default_input_complete = tuple()
# Only echo input if no timeout is specified
echo = seconds is None and echo
user_input = b''
try:
_user_input = display.prompt_until(prompt, private=not echo, seconds=seconds, complete_input=default_input_complete)
except AnsiblePromptInterrupt:
user_input = None
except AnsiblePromptNoninteractive:
if seconds is None:
display.warning("Not waiting for response to prompt as stdin is not interactive")
else:
# wait specified duration
time.sleep(seconds)
else:
if seconds is None:
user_input = _user_input
# user interrupt
if user_input is None:
prompt = "Press 'C' to continue the play or 'A' to abort \r"
try:
user_input = display.prompt_until(prompt, private=not echo, interrupt_input=(b'a',), complete_input=(b'c',))
except AnsiblePromptInterrupt:
raise AnsibleError('user requested abort!')
duration = time.time() - start
result['stop'] = to_text(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
result['user_input'] = to_text(user_input, errors='surrogate_or_strict')
return result
| ActionModule |
python | python-attrs__attrs | src/attr/validators.py | {
"start": 16719,
"end": 17871
} | class ____:
type = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not issubclass(value, self.type):
msg = f"'{attr.name}' must be a subclass of {self.type!r} (got {value!r})."
raise TypeError(
msg,
attr,
self.type,
value,
)
def __repr__(self):
return f"<subclass_of validator for type {self.type!r}>"
def _subclass_of(type):
"""
A validator that raises a `TypeError` if the initializer is called with a
wrong type for this particular attribute (checks are performed using
`issubclass` therefore it's also valid to pass a tuple of types).
Args:
type (type | tuple[type, ...]): The type(s) to check for.
Raises:
TypeError:
With a human readable error message, the attribute (of type
`attrs.Attribute`), the expected type, and the value it got.
"""
return _SubclassOfValidator(type)
@attrs(repr=False, slots=True, unsafe_hash=True)
| _SubclassOfValidator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess24.py | {
"start": 499,
"end": 782
} | class ____(Any):
y: Desc
v1 = DerivesFromUnknown().x
reveal_type(v1, expected_text="Unknown")
v2 = DerivesFromAny().x
reveal_type(v2, expected_text="Any")
reveal_type(DerivesFromUnknown().y, expected_text="int")
reveal_type(DerivesFromAny().y, expected_text="int")
| DerivesFromAny |
python | astropy__astropy | astropy/coordinates/builtin_frames/altaz.py | {
"start": 3590,
"end": 5707
} | class ____(BaseCoordinateFrame):
"""
A coordinate or frame in the Altitude-Azimuth system (Horizontal
coordinates) with respect to the WGS84 ellipsoid. Azimuth is oriented
East of North (i.e., N=0, E=90 degrees). Altitude is also known as
elevation angle, so this frame is also in the Azimuth-Elevation system.
This frame is assumed to *include* refraction effects if the ``pressure``
frame attribute is non-zero.
The frame attributes are listed under **Other Parameters**, which are
necessary for transforming from AltAz to some other system.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "az"),
RepresentationMapping("lat", "alt"),
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
obstime = TimeAttribute(
default=None, doc="The reference time (e.g., time of observation)"
)
location = EarthLocationAttribute(
default=None, doc="The location on Earth of the observer"
)
pressure = QuantityAttribute(default=0, unit=u.hPa, doc="The atmospheric pressure")
temperature = QuantityAttribute(
default=0, unit=u.deg_C, doc="The ground-level temperature"
)
relative_humidity = QuantityAttribute(
default=0, unit=u.dimensionless_unscaled, doc="The relative humidity"
)
obswl = QuantityAttribute(
default=1 * u.micron,
unit=u.micron,
doc="The average wavelength of observations",
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def secz(self):
"""
Secant of the zenith angle for this coordinate, a common estimate of
the airmass.
"""
return 1 / np.sin(self.alt)
@property
def zen(self):
"""
The zenith angle (or zenith distance / co-altitude) for this coordinate.
"""
return _90DEG.to(self.alt.unit) - self.alt
# self-transform defined in icrs_observed_transforms.py
| AltAz |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 11953,
"end": 12185
} | class ____(Base3):
@overload
def case(self, value: int) -> Iterable[int]: ...
@overload
def case(self, value: float) -> Iterable[float]: ...
def case(self, value: Any) -> Iterable[Any]:
return []
| Derived3 |
python | psf__black | tests/data/miscellaneous/force_pyi.py | {
"start": 192,
"end": 210
} | class ____: ...
@hmm
| C |
python | numpy__numpy | numpy/_core/tests/test_function_base.py | {
"start": 1535,
"end": 4115
} | class ____:
def test_basic(self):
y = logspace(0, 6)
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
y = logspace(0, 6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
def test_start_stop_array(self):
start = array([0., 1.])
stop = array([6., 7.])
t1 = logspace(start, stop, 6)
t2 = stack([logspace(_start, _stop, 6)
for _start, _stop in zip(start, stop)], axis=1)
assert_equal(t1, t2)
t3 = logspace(start, stop[0], 6)
t4 = stack([logspace(_start, stop[0], 6)
for _start in start], axis=1)
assert_equal(t3, t4)
t5 = logspace(start, stop, 6, axis=-1)
assert_equal(t5, t2.T)
@pytest.mark.parametrize("axis", [0, 1, -1])
def test_base_array(self, axis: int):
start = 1
stop = 2
num = 6
base = array([1, 2])
t1 = logspace(start, stop, num=num, base=base, axis=axis)
t2 = stack(
[logspace(start, stop, num=num, base=_base) for _base in base],
axis=(axis + 1) % t1.ndim,
)
assert_equal(t1, t2)
@pytest.mark.parametrize("axis", [0, 1, -1])
def test_stop_base_array(self, axis: int):
start = 1
stop = array([2, 3])
num = 6
base = array([1, 2])
t1 = logspace(start, stop, num=num, base=base, axis=axis)
t2 = stack(
[logspace(start, _stop, num=num, base=_base)
for _stop, _base in zip(stop, base)],
axis=(axis + 1) % t1.ndim,
)
assert_equal(t1, t2)
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = logspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
assert_equal(logspace(a, b), logspace(1.0, 5.0))
def test_subclass(self):
a = array(1).view(PhysicalQuantity2)
b = array(7).view(PhysicalQuantity2)
ls = logspace(a, b)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0))
ls = logspace(a, b, 1)
assert type(ls) is PhysicalQuantity2
assert_equal(ls, logspace(1.0, 7.0, 1))
| TestLogspace |
python | pytorch__pytorch | torch/distributed/checkpoint/_async_process_executor.py | {
"start": 1375,
"end": 1678
} | class ____:
staged_state_dict: STATE_DICT_TYPE
checkpoint_request_id: _CheckpointRequestIdentifier
storage_writer: Optional[StorageWriter] = None
planner: Optional[SavePlanner] = None
no_dist: bool = False
use_collectives: bool = True
@dataclass(init=False)
| _AsyncCheckpointRequest |
python | openai__openai-python | src/openai/types/shared/error_object.py | {
"start": 178,
"end": 305
} | class ____(BaseModel):
code: Optional[str] = None
message: str
param: Optional[str] = None
type: str
| ErrorObject |
python | ijl__orjson | test/test_fixture.py | {
"start": 159,
"end": 1532
} | class ____:
def test_twitter(self):
"""
loads(),dumps() twitter.json
"""
val = read_fixture_str("twitter.json.xz")
read = orjson.loads(val)
assert orjson.loads(orjson.dumps(read)) == read
@needs_data
def test_canada(self):
"""
loads(), dumps() canada.json
"""
val = read_fixture_str("canada.json.xz")
read = orjson.loads(val)
assert orjson.loads(orjson.dumps(read)) == read
def test_citm_catalog(self):
"""
loads(), dumps() citm_catalog.json
"""
val = read_fixture_str("citm_catalog.json.xz")
read = orjson.loads(val)
assert orjson.loads(orjson.dumps(read)) == read
def test_github(self):
"""
loads(), dumps() github.json
"""
val = read_fixture_str("github.json.xz")
read = orjson.loads(val)
assert orjson.loads(orjson.dumps(read)) == read
def test_blns(self):
"""
loads() blns.json JSONDecodeError
https://github.com/minimaxir/big-list-of-naughty-strings
"""
val = read_fixture_bytes("blns.txt.xz")
for line in val.split(b"\n"):
if line and not line.startswith(b"#"):
with pytest.raises(orjson.JSONDecodeError):
_ = orjson.loads(b'"' + val + b'"')
| TestFixture |
python | django__django | tests/utils_tests/test_choices.py | {
"start": 450,
"end": 2019
} | class ____(SimpleTestCase):
def test_not_implemented_error_on_missing_iter(self):
class InvalidChoiceIterator(BaseChoiceIterator):
pass # Not overriding __iter__().
msg = "BaseChoiceIterator subclasses must implement __iter__()."
with self.assertRaisesMessage(NotImplementedError, msg):
iter(InvalidChoiceIterator())
def test_eq(self):
unrolled = [(1, "Item #1"), (2, "Item #2"), (3, "Item #3")]
self.assertEqual(SimpleChoiceIterator(), unrolled)
self.assertEqual(unrolled, SimpleChoiceIterator())
def test_eq_instances(self):
self.assertEqual(SimpleChoiceIterator(), SimpleChoiceIterator())
def test_not_equal_subset(self):
self.assertNotEqual(SimpleChoiceIterator(), [(1, "Item #1"), (2, "Item #2")])
def test_not_equal_superset(self):
self.assertNotEqual(
SimpleChoiceIterator(),
[(1, "Item #1"), (2, "Item #2"), (3, "Item #3"), None],
)
def test_getitem(self):
choices = SimpleChoiceIterator()
for i, expected in [(0, (1, "Item #1")), (-1, (3, "Item #3"))]:
with self.subTest(index=i):
self.assertEqual(choices[i], expected)
def test_getitem_indexerror(self):
choices = SimpleChoiceIterator()
for i in (4, -4):
with self.subTest(index=i):
with self.assertRaises(IndexError) as ctx:
choices[i]
self.assertTrue(str(ctx.exception).endswith("index out of range"))
| ChoiceIteratorTests |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_project_codeowners_index.py | {
"start": 188,
"end": 41683
} | class ____(APITestCase):
def setUp(self) -> None:
self.user = self.create_user("admin@sentry.io", is_superuser=True)
self.login_as(user=self.user)
self.team = self.create_team(
organization=self.organization, slug="tiger-team", members=[self.user]
)
self.project = self.project = self.create_project(
organization=self.organization, teams=[self.team], slug="bengal"
)
self.code_mapping = self.create_code_mapping(
project=self.project,
)
self.external_user = self.create_external_user(
external_name="@NisanthanNanthakumar", integration=self.integration
)
self.external_team = self.create_external_team(integration=self.integration)
self.url = reverse(
"sentry-api-0-project-codeowners",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
},
)
self.data = {
"raw": "docs/* @NisanthanNanthakumar @getsentry/ecosystem\n",
"codeMappingId": self.code_mapping.id,
}
def test_no_codeowners(self) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
resp = self.client.get(self.url)
assert resp.status_code == 200
assert resp.data == []
def test_without_feature_flag(self) -> None:
with self.feature({"organizations:integrations-codeowners": False}):
resp = self.client.get(self.url)
assert resp.status_code == 403
assert resp.data == {"detail": "You do not have permission to perform this action."}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_get_codeowners_with_integration_post_creation(
self, get_codeowner_mock_file: MagicMock
) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
post_resp = self.client.post(self.url, self.data)
assert post_resp.status_code == 201
get_resp = self.client.get(self.url)
assert get_resp.status_code == 200
resp_data = get_resp.data[0]
assert resp_data["raw"] == self.data["raw"].strip()
assert resp_data["codeMappingId"] == str(self.code_mapping.id)
assert resp_data["provider"] == self.integration.provider
assert resp_data["codeOwnersUrl"] == "https://github.com/test/CODEOWNERS"
assert resp_data["schema"] == {
"$version": 1,
"rules": [
{
"matcher": {"type": "codeowners", "pattern": "docs/*"},
"owners": [
{"type": "user", "id": self.user.id, "name": self.user.email},
{"type": "team", "id": self.team.id, "name": self.team.slug},
],
}
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_get_codeowners_no_schema_initially(self, get_codeowner_mock_file: MagicMock) -> None:
code_owner = self.create_codeowners(
self.project, self.code_mapping, raw=f"*.js {self.external_team.external_name}"
)
with self.feature({"organizations:integrations-codeowners": True}):
resp = self.client.get(self.url)
assert resp.status_code == 200
resp_data = resp.data[0]
assert resp_data["raw"] == code_owner.raw
assert resp_data["dateCreated"] == code_owner.date_added
assert resp_data["dateUpdated"] == code_owner.date_updated
assert resp_data["codeMappingId"] == str(self.code_mapping.id)
assert resp_data["provider"] == self.integration.provider
assert resp_data["codeOwnersUrl"] == "https://github.com/test/CODEOWNERS"
assert resp_data["schema"] == {}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_get_codeowners_with_integration(self, get_codeowner_mock_file: MagicMock) -> None:
code_owner = self.create_codeowners(
self.project, self.code_mapping, raw=f"*.js {self.external_team.external_name}"
)
code_owner.update_schema(organization=self.organization, raw=code_owner.raw)
with self.feature({"organizations:integrations-codeowners": True}):
resp = self.client.get(self.url)
assert resp.status_code == 200
resp_data = resp.data[0]
assert resp_data["raw"] == code_owner.raw
assert resp_data["dateCreated"] == code_owner.date_added
assert resp_data["dateUpdated"] == code_owner.date_updated
assert resp_data["codeMappingId"] == str(self.code_mapping.id)
assert resp_data["provider"] == self.integration.provider
assert resp_data["codeOwnersUrl"] == "https://github.com/test/CODEOWNERS"
assert resp_data["schema"] == {
"$version": 1,
"rules": [
{
"matcher": {"type": "codeowners", "pattern": "*.js"},
"owners": [{"type": "team", "id": self.team.id, "name": self.team.slug}],
}
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_get_expanded_codeowners_with_integration(
self, get_codeowner_mock_file: MagicMock
) -> None:
code_owner = self.create_codeowners(
self.project, self.code_mapping, raw=f"*.js {self.external_team.external_name}"
)
code_owner.update_schema(organization=self.organization, raw=code_owner.raw)
with self.feature({"organizations:integrations-codeowners": True}):
resp = self.client.get(f"{self.url}?expand=codeMapping")
assert resp.status_code == 200
resp_data = resp.data[0]
assert resp_data["raw"] == code_owner.raw
assert resp_data["dateCreated"] == code_owner.date_added
assert resp_data["dateUpdated"] == code_owner.date_updated
assert resp_data["codeMappingId"] == str(self.code_mapping.id)
assert resp_data["codeMapping"]["id"] == str(self.code_mapping.id)
assert resp_data["provider"] == self.integration.provider
assert resp_data["codeOwnersUrl"] == "https://github.com/test/CODEOWNERS"
assert resp_data["schema"] == {
"$version": 1,
"rules": [
{
"matcher": {"type": "codeowners", "pattern": "*.js"},
"owners": [{"type": "team", "id": self.team.id, "name": self.team.slug}],
}
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_basic_post(self, get_codeowner_mock_file: MagicMock) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201, response.content
assert response.data["id"]
assert response.data["raw"] == "docs/* @NisanthanNanthakumar @getsentry/ecosystem"
assert response.data["codeMappingId"] == str(self.code_mapping.id)
assert response.data["provider"] == "github"
assert response.data["ownershipSyntax"] == "codeowners:docs/* admin@sentry.io #tiger-team\n"
errors = response.data["errors"]
assert errors["missing_external_teams"] == []
assert errors["missing_external_users"] == []
assert errors["missing_user_emails"] == []
assert errors["teams_without_access"] == []
assert errors["users_without_access"] == []
def test_empty_codeowners_text(self) -> None:
self.data["raw"] = ""
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 400
assert response.data == {"raw": ["This field may not be blank."]}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_invalid_codeowners_text(self, get_codeowner_mock_file: MagicMock) -> None:
self.data["raw"] = "docs/*"
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201
assert response.data["raw"] == "docs/*"
assert response.data["codeMappingId"] == str(self.code_mapping.id)
assert response.data["provider"] == "github"
assert response.data["ownershipSyntax"] == ""
errors = response.data["errors"]
assert errors["missing_external_teams"] == []
assert errors["missing_external_users"] == []
assert errors["missing_user_emails"] == []
assert errors["teams_without_access"] == []
assert errors["users_without_access"] == []
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_cannot_find_external_user_name_association(
self, get_codeowner_mock_file: MagicMock
) -> None:
self.data["raw"] = "docs/* @MeredithAnya"
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201
assert response.data["raw"] == "docs/* @MeredithAnya"
assert response.data["codeMappingId"] == str(self.code_mapping.id)
assert response.data["provider"] == "github"
assert response.data["ownershipSyntax"] == ""
errors = response.data["errors"]
assert errors["missing_external_teams"] == []
assert set(errors["missing_external_users"]) == {"@MeredithAnya"}
assert errors["missing_user_emails"] == []
assert errors["teams_without_access"] == []
assert errors["users_without_access"] == []
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_cannot_find_sentry_user_with_email(self, get_codeowner_mock_file: MagicMock) -> None:
self.data["raw"] = "docs/* someuser@sentry.io"
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201
assert response.data["raw"] == "docs/* someuser@sentry.io"
assert response.data["codeMappingId"] == str(self.code_mapping.id)
assert response.data["provider"] == "github"
assert response.data["ownershipSyntax"] == ""
errors = response.data["errors"]
assert errors["missing_external_teams"] == []
assert errors["missing_external_users"] == []
assert set(errors["missing_user_emails"]) == {"someuser@sentry.io"}
assert errors["teams_without_access"] == []
assert errors["users_without_access"] == []
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_cannot_find_external_team_name_association(
self, get_codeowner_mock_file: MagicMock
) -> None:
self.data["raw"] = "docs/* @getsentry/frontend\nstatic/* @getsentry/frontend"
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201
assert response.data["raw"] == "docs/* @getsentry/frontend\nstatic/* @getsentry/frontend"
assert response.data["codeMappingId"] == str(self.code_mapping.id)
assert response.data["provider"] == "github"
assert response.data["ownershipSyntax"] == ""
errors = response.data["errors"]
assert set(errors["missing_external_teams"]) == {"@getsentry/frontend"}
assert errors["missing_external_users"] == []
assert errors["missing_user_emails"] == []
assert errors["teams_without_access"] == []
assert errors["users_without_access"] == []
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_cannot_find__multiple_external_name_association(
self, get_codeowner_mock_file: MagicMock
) -> None:
self.data["raw"] = "docs/* @AnotherUser @getsentry/frontend @getsentry/docs"
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201
assert response.data["raw"] == "docs/* @AnotherUser @getsentry/frontend @getsentry/docs"
assert response.data["codeMappingId"] == str(self.code_mapping.id)
assert response.data["provider"] == "github"
assert response.data["ownershipSyntax"] == ""
errors = response.data["errors"]
assert set(errors["missing_external_teams"]) == {"@getsentry/frontend", "@getsentry/docs"}
assert set(errors["missing_external_users"]) == {"@AnotherUser"}
assert errors["missing_user_emails"] == []
assert errors["teams_without_access"] == []
assert errors["users_without_access"] == []
def test_missing_code_mapping_id(self) -> None:
self.data.pop("codeMappingId")
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 400
assert response.data == {"codeMappingId": ["This field is required."]}
def test_invalid_code_mapping_id(self) -> None:
self.data["codeMappingId"] = 500
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 400
assert response.data == {"codeMappingId": ["This code mapping does not exist."]}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_no_duplicates_allowed(self, get_codeowner_mock_file: MagicMock) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201, response.content
response = self.client.post(self.url, self.data)
assert response.status_code == 400
assert response.data == {"codeMappingId": ["This code mapping is already in use."]}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_schema_is_correct(self, get_codeowner_mock_file: MagicMock) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201, response.content
assert response.data["id"]
project_codeowners = ProjectCodeOwners.objects.get(id=response.data["id"])
assert project_codeowners.schema == {
"$version": 1,
"rules": [
{
"matcher": {"pattern": "docs/*", "type": "codeowners"},
"owners": [
{"id": self.user.id, "identifier": self.user.email, "type": "user"},
{"id": self.team.id, "identifier": self.team.slug, "type": "team"},
],
}
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_case_insensitive_team_matching(self, get_codeowner_mock_file: MagicMock) -> None:
"""Test that team names are matched case-insensitively in CODEOWNERS files."""
external_team_name = self.external_team.external_name
capitalized_external_team_name = external_team_name.swapcase()
self.data[
"raw"
] = f"""
src/frontend/* {external_team_name}
src/frontend2/* {capitalized_external_team_name}
"""
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201, response.content
assert (
response.data["ownershipSyntax"]
== f"codeowners:src/frontend/* #{self.team.slug}\ncodeowners:src/frontend2/* #{self.team.slug}\n"
)
errors = response.data["errors"]
assert errors["missing_external_teams"] == []
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_multiple_mappings_to_same_sentry_team(
self, get_codeowner_mock_file: MagicMock
) -> None:
"""Multiple external teams map to the same Sentry team"""
# 2 external teams that map to the same Sentry team
# so 2 external actors @getsentry/ecosystem and @other-external-team both map to #tiger-team
external_team_2 = self.create_external_team(
team=self.team,
integration=self.integration,
external_name="@getsentry/other-external-team",
)
assert self.external_team.external_name != external_team_2.external_name
self.data[
"raw"
] = f"""
src/frontend/* {self.external_team.external_name}
src/frontend2/* {external_team_2.external_name}
"""
with self.feature(
{
"organizations:integrations-codeowners": True,
}
):
response = self.client.post(self.url, self.data)
assert response.status_code == 201, response.content
assert (
response.data["ownershipSyntax"]
== f"codeowners:src/frontend/* #{self.team.slug}\ncodeowners:src/frontend2/* #{self.team.slug}\n"
)
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_schema_preserves_comments(self, get_codeowner_mock_file: MagicMock) -> None:
self.data["raw"] = "docs/* @NisanthanNanthakumar @getsentry/ecosystem\n"
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201, response.content
assert response.data["id"]
project_codeowners = ProjectCodeOwners.objects.get(id=response.data["id"])
assert project_codeowners.schema == {
"$version": 1,
"rules": [
{
"matcher": {"pattern": "docs/*", "type": "codeowners"},
"owners": [
{"id": self.user.id, "identifier": self.user.email, "type": "user"},
{"id": self.team.id, "identifier": self.team.slug, "type": "team"},
],
}
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_raw_email_correct_schema(self, get_codeowner_mock_file: MagicMock) -> None:
self.data["raw"] = f"docs/* {self.user.email} @getsentry/ecosystem\n"
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201, response.content
assert response.data["id"]
project_codeowners = ProjectCodeOwners.objects.get(id=response.data["id"])
assert project_codeowners.schema == {
"$version": 1,
"rules": [
{
"matcher": {"pattern": "docs/*", "type": "codeowners"},
"owners": [
{"id": self.user.id, "identifier": self.user.email, "type": "user"},
{"id": self.team.id, "identifier": self.team.slug, "type": "team"},
],
}
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_codeowners_scope_emails_to_org_security(
self, get_codeowner_mock_file: MagicMock
) -> None:
self.user2 = self.create_user("user2@sentry.io")
self.data = {
"raw": "docs/* @NisanthanNanthakumar user2@sentry.io\n",
"codeMappingId": self.code_mapping.id,
}
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201
assert response.data["id"]
assert response.data["raw"] == "docs/* @NisanthanNanthakumar user2@sentry.io"
assert response.data["codeMappingId"] == str(self.code_mapping.id)
assert response.data["provider"] == "github"
assert response.data["ownershipSyntax"] == "codeowners:docs/* admin@sentry.io\n"
errors = response.data["errors"]
assert errors["missing_external_teams"] == []
assert errors["missing_external_users"] == []
assert set(errors["missing_user_emails"]) == {self.user2.email}
assert errors["teams_without_access"] == []
assert errors["users_without_access"] == []
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_multiple_codeowners_for_project(self, get_codeowner_mock_file: MagicMock) -> None:
code_mapping_2 = self.create_code_mapping(stack_root="src/")
self.create_codeowners(code_mapping=code_mapping_2)
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_users_without_access(self, get_codeowner_mock_file: MagicMock) -> None:
user_2 = self.create_user("bar@example.com")
self.create_member(organization=self.organization, user=user_2, role="member")
team_2 = self.create_team(name="foo", organization=self.organization, members=[user_2])
self.create_project(organization=self.organization, teams=[team_2], slug="bass")
self.create_external_user(
user=user_2, external_name="@foobarSentry", integration=self.integration
)
self.data["raw"] = "docs/* @foobarSentry\nstatic/* @foobarSentry"
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201
assert response.data["raw"] == "docs/* @foobarSentry\nstatic/* @foobarSentry"
assert response.data["codeMappingId"] == str(self.code_mapping.id)
assert response.data["provider"] == "github"
assert response.data["ownershipSyntax"] == ""
errors = response.data["errors"]
assert errors["missing_external_teams"] == []
assert errors["missing_external_users"] == []
assert errors["missing_user_emails"] == []
assert errors["teams_without_access"] == []
assert set(errors["users_without_access"]) == {user_2.email}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_post_with_schema(self, get_codeowner_mock_file: MagicMock) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
response = self.client.post(self.url, self.data)
assert response.status_code == 201
assert response.data["raw"] == "docs/* @NisanthanNanthakumar @getsentry/ecosystem"
assert response.data["codeMappingId"] == str(self.code_mapping.id)
assert response.data["schema"] == {
"$version": 1,
"rules": [
{
"matcher": {"type": "codeowners", "pattern": "docs/*"},
"owners": [
{"type": "user", "id": self.user.id, "identifier": "admin@sentry.io"},
{"type": "team", "id": self.team.id, "identifier": "tiger-team"},
],
}
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_get(self, get_codeowner_mock_file: MagicMock) -> None:
self.client.post(self.url, self.data)
response = self.client.get(self.url)
response_data = response.data[0]
assert response.status_code == 200
assert response_data["raw"] == "docs/* @NisanthanNanthakumar @getsentry/ecosystem"
assert response_data["codeMappingId"] == str(self.code_mapping.id)
assert response_data["schema"] == {
"$version": 1,
"rules": [
{
"matcher": {"type": "codeowners", "pattern": "docs/*"},
"owners": [
{
"type": "user",
"id": self.user.id,
"name": "admin@sentry.io",
},
{"type": "team", "id": self.team.id, "name": "tiger-team"},
],
}
],
}
assert response_data["codeOwnersUrl"] == "https://github.com/test/CODEOWNERS"
# Assert that "identifier" is not renamed to "name" in the backend
ownership = ProjectCodeOwners.objects.get(project=self.project)
assert ownership.schema["rules"] == [
{
"matcher": {"type": "codeowners", "pattern": "docs/*"},
"owners": [
{"type": "user", "identifier": "admin@sentry.io", "id": self.user.id},
{"type": "team", "identifier": "tiger-team", "id": self.team.id},
],
}
]
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
@patch("sentry.tasks.codeowners.update_code_owners_schema")
def test_get_one_external_user_deletion_schema_updates_triggered(
self, mock_update_code_owners_schema: MagicMock, get_codeowner_mock_file: MagicMock
) -> None:
self.member_user_delete = self.create_user("member_delete@localhost", is_superuser=False)
self.create_member(
user=self.member_user_delete,
organization=self.organization,
role="member",
teams=[self.team],
)
self.external_delete_user = self.create_external_user(
user=self.member_user_delete, external_name="@delete", integration=self.integration
)
self.data["raw"] = "docs/* @delete @getsentry/ecosystem"
with self.feature({"organizations:integrations-codeowners": True}):
self.client.post(self.url, self.data)
self.external_delete_user.delete()
# 2 calls: creation of one external user, deletion of one external user
assert mock_update_code_owners_schema.apply_async.call_count == 2
# Schema updates haven't run, so we should get the original schema
response = self.client.get(self.url)
assert response.data[0]["schema"] == {
"$version": 1,
"rules": [
{
"matcher": {"type": "codeowners", "pattern": "docs/*"},
"owners": [
{
"type": "user",
"name": self.member_user_delete.email,
"id": self.member_user_delete.id,
},
{"type": "team", "name": self.team.slug, "id": self.team.id},
],
}
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_get_one_external_user_deletion_schema_updates_correct(
self, get_codeowner_mock_file: MagicMock
) -> None:
self.member_user_delete = self.create_user("member_delete@localhost", is_superuser=False)
self.create_member(
user=self.member_user_delete,
organization=self.organization,
role="member",
teams=[self.team],
)
self.external_delete_user = self.create_external_user(
user=self.member_user_delete, external_name="@delete", integration=self.integration
)
self.data["raw"] = "docs/* @delete @getsentry/ecosystem"
with self.tasks(), self.feature({"organizations:integrations-codeowners": True}):
self.client.post(self.url, self.data)
self.external_delete_user.delete()
response = self.client.get(self.url)
assert response.data[0]["schema"] == {
"$version": 1,
"rules": [
{
"matcher": {"type": "codeowners", "pattern": "docs/*"},
"owners": [{"type": "team", "name": self.team.slug, "id": self.team.id}],
}
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
@patch("sentry.tasks.codeowners.update_code_owners_schema")
def test_get_all_external_users_deletion_schema_updates_triggered(
self, mock_update_code_owners_schema: MagicMock, get_codeowner_mock_file: MagicMock
) -> None:
self.member_user_delete = self.create_user("member_delete@localhost", is_superuser=False)
self.create_member(
user=self.member_user_delete,
organization=self.organization,
role="member",
teams=[self.team],
)
self.external_delete_user = self.create_external_user(
user=self.member_user_delete, external_name="@delete", integration=self.integration
)
self.data["raw"] = "docs/* @delete"
with self.feature({"organizations:integrations-codeowners": True}):
self.client.post(self.url, self.data)
self.external_delete_user.delete()
# 2 calls: creation of one external user, deletion of one external user
assert mock_update_code_owners_schema.apply_async.call_count == 2
# Schema updates haven't run, so we should get the original schema
response = self.client.get(self.url)
assert response.data[0]["schema"] == {
"$version": 1,
"rules": [
{
"matcher": {"pattern": "docs/*", "type": "codeowners"},
"owners": [
{
"type": "user",
"name": self.member_user_delete.email,
"id": self.member_user_delete.id,
}
],
}
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_get_all_external_users_deletion_schema_updates_correct(
self, get_codeowner_mock_file: MagicMock
) -> None:
self.member_user_delete = self.create_user("member_delete@localhost", is_superuser=False)
self.create_member(
user=self.member_user_delete,
organization=self.organization,
role="member",
teams=[self.team],
)
self.external_delete_user = self.create_external_user(
user=self.member_user_delete, external_name="@delete", integration=self.integration
)
self.data["raw"] = "docs/* @delete"
with self.tasks(), self.feature({"organizations:integrations-codeowners": True}):
self.client.post(self.url, self.data)
self.external_delete_user.delete()
response = self.client.get(self.url)
assert response.data[0]["schema"] == {"$version": 1, "rules": []}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
@patch("sentry.tasks.codeowners.update_code_owners_schema")
def test_get_multiple_rules_deleted_owners_schema_updates_triggered(
self, mock_update_code_owners_schema: MagicMock, get_codeowner_mock_file: MagicMock
) -> None:
self.member_user_delete_1 = self.create_user(
"member_delete_1@localhost", is_superuser=False
)
self.create_member(
user=self.member_user_delete_1,
organization=self.organization,
role="member",
teams=[self.team],
)
self.external_delete_user_1 = self.create_external_user(
user=self.member_user_delete_1, external_name="@delete-1", integration=self.integration
)
self.member_user_delete_2 = self.create_user(
"member_delete_2@localhost", is_superuser=False
)
self.create_member(
user=self.member_user_delete_2,
organization=self.organization,
role="member",
teams=[self.team],
)
self.external_delete_user_2 = self.create_external_user(
user=self.member_user_delete_2, external_name="@delete-2", integration=self.integration
)
self.data["raw"] = (
"docs/* @delete-1\n*.py @getsentry/ecosystem @delete-1\n*.css @delete-2\n*.rb @NisanthanNanthakumar"
)
with self.feature({"organizations:integrations-codeowners": True}):
self.client.post(self.url, self.data)
self.external_delete_user_1.delete()
self.external_delete_user_2.delete()
# 4 calls: creation of two external users, deletion of two external users
assert mock_update_code_owners_schema.apply_async.call_count == 4
# Schema updates haven't run, so we should get the original schema
response = self.client.get(self.url)
assert response.data[0]["schema"] == {
"$version": 1,
"rules": [
{
"matcher": {"type": "codeowners", "pattern": "docs/*"},
"owners": [
{
"type": "user",
"name": self.member_user_delete_1.email,
"id": self.member_user_delete_1.id,
}
],
},
{
"matcher": {"type": "codeowners", "pattern": "*.py"},
"owners": [
{"type": "team", "name": self.team.slug, "id": self.team.id},
{
"type": "user",
"name": self.member_user_delete_1.email,
"id": self.member_user_delete_1.id,
},
],
},
{
"matcher": {
"pattern": "*.css",
"type": "codeowners",
},
"owners": [
{
"type": "user",
"name": self.member_user_delete_2.email,
"id": self.member_user_delete_2.id,
},
],
},
{
"matcher": {"type": "codeowners", "pattern": "*.rb"},
"owners": [
{
"type": "user",
"name": self.user.email,
"id": self.user.id,
}
],
},
],
}
@patch(
"sentry.integrations.source_code_management.repository.RepositoryIntegration.get_codeowner_file",
return_value={"html_url": "https://github.com/test/CODEOWNERS"},
)
def test_get_multiple_rules_deleted_owners_schema_updates_correct(
self, get_codeowner_mock_file: MagicMock
) -> None:
self.member_user_delete_1 = self.create_user(
"member_delete_1@localhost", is_superuser=False
)
self.create_member(
user=self.member_user_delete_1,
organization=self.organization,
role="member",
teams=[self.team],
)
self.external_delete_user_1 = self.create_external_user(
user=self.member_user_delete_1, external_name="@delete-1", integration=self.integration
)
self.member_user_delete_2 = self.create_user(
"member_delete_2@localhost", is_superuser=False
)
self.create_member(
user=self.member_user_delete_2,
organization=self.organization,
role="member",
teams=[self.team],
)
self.external_delete_user_2 = self.create_external_user(
user=self.member_user_delete_2, external_name="@delete-2", integration=self.integration
)
self.data["raw"] = (
"docs/* @delete-1\n*.py @getsentry/ecosystem @delete-1\n*.css @delete-2\n*.rb @NisanthanNanthakumar"
)
with self.tasks(), self.feature({"organizations:integrations-codeowners": True}):
self.client.post(self.url, self.data)
self.external_delete_user_1.delete()
self.external_delete_user_2.delete()
response = self.client.get(self.url)
assert response.data[0]["schema"] == {
"$version": 1,
"rules": [
{
"matcher": {"type": "codeowners", "pattern": "*.py"},
"owners": [{"type": "team", "name": self.team.slug, "id": self.team.id}],
},
{
"matcher": {"type": "codeowners", "pattern": "*.rb"},
"owners": [
{
"type": "user",
"name": self.user.email,
"id": self.user.id,
}
],
},
],
}
| ProjectCodeOwnersEndpointTestCase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass3.py | {
"start": 665,
"end": 704
} | class ____[T2 = T1, T1 = str]: ...
| ClassC |
python | django__django | tests/staticfiles_tests/test_storage.py | {
"start": 31414,
"end": 33078
} | class ____(SimpleTestCase):
def setUp(self):
manifest_path = Path(tempfile.mkdtemp())
self.addCleanup(shutil.rmtree, manifest_path)
self.staticfiles_storage = CustomManifestStorage(
manifest_location=manifest_path,
)
self.manifest_file = manifest_path / self.staticfiles_storage.manifest_name
# Manifest without paths.
self.manifest = {"version": self.staticfiles_storage.manifest_version}
with self.manifest_file.open("w") as manifest_file:
json.dump(self.manifest, manifest_file)
def test_read_manifest(self):
self.assertEqual(
self.staticfiles_storage.read_manifest(),
json.dumps(self.manifest),
)
def test_read_manifest_nonexistent(self):
os.remove(self.manifest_file)
self.assertIsNone(self.staticfiles_storage.read_manifest())
def test_save_manifest_override(self):
self.assertIs(self.manifest_file.exists(), True)
self.staticfiles_storage.save_manifest()
self.assertIs(self.manifest_file.exists(), True)
new_manifest = json.loads(self.staticfiles_storage.read_manifest())
self.assertIn("paths", new_manifest)
self.assertNotEqual(new_manifest, self.manifest)
def test_save_manifest_create(self):
os.remove(self.manifest_file)
self.staticfiles_storage.save_manifest()
self.assertIs(self.manifest_file.exists(), True)
new_manifest = json.loads(self.staticfiles_storage.read_manifest())
self.assertIn("paths", new_manifest)
self.assertNotEqual(new_manifest, self.manifest)
| TestCustomManifestStorage |
python | pytorch__pytorch | test/torch_np/test_function_base.py | {
"start": 449,
"end": 1054
} | class ____(TestCase):
# tests taken from np.append docstring
def test_basic(self):
result = np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
assert_equal(result, np.arange(1, 10, dtype=int))
# When `axis` is specified, `values` must have the correct shape.
result = np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
assert_equal(result, np.arange(1, 10, dtype=int).reshape((3, 3)))
with pytest.raises((RuntimeError, ValueError)):
np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
if __name__ == "__main__":
run_tests()
| TestAppend |
python | geekcomputers__Python | BlackJack_game/blackjack_rr.py | {
"start": 1305,
"end": 1730
} | class ____:
def __init__(self):
self.cards = []
self.value = 0
self.aces = 0 # to keep track of aces
def add_card(self, card):
self.cards.append(card)
self.value += values[card.rank]
if card.rank == "Ace":
self.aces += 1
def adjust_for_ace(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
| Hand |
python | django__django | django/contrib/postgres/operations.py | {
"start": 11193,
"end": 12617
} | class ____(Operation):
"""Validate a table NOT VALID constraint."""
category = OperationCategory.ALTERATION
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def describe(self):
return "Validate constraint %s on model %s" % (self.name, self.model_name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.execute(
"ALTER TABLE %s VALIDATE CONSTRAINT %s"
% (
schema_editor.quote_name(model._meta.db_table),
schema_editor.quote_name(self.name),
)
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
# PostgreSQL does not provide a way to make a constraint invalid.
pass
def state_forwards(self, app_label, state):
pass
@property
def migration_name_fragment(self):
return "%s_validate_%s" % (self.model_name.lower(), self.name.lower())
def deconstruct(self):
return (
self.__class__.__name__,
[],
{
"model_name": self.model_name,
"name": self.name,
},
)
| ValidateConstraint |
python | google__jax | tests/absl_cpp_logging_test.py | {
"start": 958,
"end": 1544
} | class ____(jtu.JaxTestCase):
@unittest.skipIf(jaxlib.version <= (0, 7, 2), "absl_set_vlog_level is broken")
def test_vlogging(self):
utils.absl_set_min_log_level(0) # INFO
with jtu.capture_stderr() as stderr:
jax.jit(lambda x: x + 1)(1)
self.assertNotIn("hlo_pass_pipeline.cc", stderr())
with jtu.capture_stderr() as stderr:
utils.absl_set_vlog_level("hlo_pass_pipeline", 1)
jax.jit(lambda x: x + 2)(1)
self.assertIn("hlo_pass_pipeline.cc", stderr())
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| AbslCppLoggingTest |
python | pytorch__pytorch | test/inductor/test_codecache.py | {
"start": 3659,
"end": 7431
} | class ____(TestCase):
def test_linemaps_empty(self):
src = """import torch"""
(key, path) = PyCodeCache.write(src, "")
# Load with an empty linemap
PyCodeCache.load_by_key_path(key, path, linemap=[])
stack_frames = PyCodeCache.stack_frames_for_code(path, 0)
self.assertEqual(stack_frames, None)
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Skip in fbcode/sandcastle")
def test_editable_cached_wrapper(self):
with tempfile.TemporaryDirectory() as tmpdir:
env = os.environ.copy()
env["TORCHINDUCTOR_CACHE_DIR"] = tmpdir
step1 = textwrap.dedent(
"""
import glob
import os
import torch
import warnings
from torch._inductor import config
warnings.filterwarnings("ignore")
config.fx_graph_cache = True
config.fx_graph_remote_cache = False
torch._dynamo.reset()
@torch.compile(backend="inductor")
def f(x):
return x * 2
f(torch.ones(2))
cache_dir = os.environ["TORCHINDUCTOR_CACHE_DIR"]
pyfiles = glob.glob(os.path.join(cache_dir, "**", "*.py"), recursive=True)
print(pyfiles[0])
"""
)
wrapper_path = (
subprocess.check_output([sys.executable, "-c", step1], env=env)
.decode()
.strip()
)
step2 = textwrap.dedent(
"""
import torch
import warnings
from torch._dynamo.utils import counters
from torch._inductor import config
warnings.filterwarnings("ignore")
config.fx_graph_cache = True
config.fx_graph_remote_cache = False
torch._dynamo.reset()
@torch.compile(backend="inductor")
def f(x):
return x * 2
f(torch.ones(2))
print(counters["inductor"]["fxgraph_cache_hit"])
"""
)
hit = (
subprocess.check_output([sys.executable, "-c", step2], env=env)
.decode()
.strip()
)
# XPU have extra lines, so get the last line, refer https://github.com/intel/torch-xpu-ops/issues/2261
if torch.xpu.is_available():
wrapper_path = wrapper_path.splitlines()[-1]
hit = hit.splitlines()[-1]
self.assertEqual(hit, "1")
with open(wrapper_path) as f:
src = f.read()
with open(wrapper_path, "w") as f:
f.write(
src.replace(
"def call(self, args):",
"def call(self, args):\n print('debug')",
)
)
step3 = textwrap.dedent(
"""
import torch
import warnings
from torch._inductor import config
warnings.filterwarnings("ignore")
config.fx_graph_cache = True
config.fx_graph_remote_cache = False
torch._dynamo.reset()
@torch.compile(backend="inductor")
def f(x):
return x * 2
f(torch.ones(2))
"""
)
out = subprocess.check_output(
[sys.executable, "-c", step3], env=env
).decode()
self.assertIn("debug", out)
@instantiate_parametrized_tests
| TestPyCodeCache |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 35810,
"end": 36863
} | class ____(TestReferrerOnRedirect):
"""
No Referrer policy never sets the "Referer" header.
HTTP redirections should not change that.
"""
settings = {"REFERRER_POLICY": "no-referrer"}
scenarii = [
(
"http://scrapytest.org/1", # parent
"http://scrapytest.org/2", # target
(
# redirections: code, URL
(301, "http://scrapytest.org/3"),
(301, "http://scrapytest.org/4"),
),
None, # expected initial "Referer"
None, # expected "Referer" for the redirection request
),
(
"https://scrapytest.org/1",
"https://scrapytest.org/2",
((301, "http://scrapytest.org/3"),),
None,
None,
),
(
"https://scrapytest.org/1",
"https://example.com/2", # different origin
((301, "http://scrapytest.com/3"),),
None,
None,
),
]
| TestReferrerOnRedirectNoReferrer |
python | pennersr__django-allauth | allauth/socialaccount/providers/figma/provider.py | {
"start": 436,
"end": 1049
} | class ____(OAuth2Provider):
id = "figma"
name = "Figma"
account_class = FigmaAccount
oauth2_adapter_class = FigmaOAuth2Adapter
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
return {
"email": data.get("email"),
"name": data.get("handle"),
}
def extract_email_addresses(self, data):
email = EmailAddress(
email=data.get("email"),
primary=True,
verified=False,
)
return [email]
providers.registry.register(FigmaProvider)
| FigmaProvider |
python | numpy__numpy | numpy/_core/tests/test_unicode.py | {
"start": 4991,
"end": 5146
} | class ____(CreateValues):
"""Check the creation of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
| TestCreateValues_1_UCS2 |
python | django__django | tests/string_lookup/tests.py | {
"start": 93,
"end": 2447
} | class ____(TestCase):
def test_string_form_referencing(self):
"""
Regression test for #1661 and #1662
String form referencing of models works, both as pre and post
reference, on all RelatedField types.
"""
f1 = Foo(name="Foo1")
f1.save()
f2 = Foo(name="Foo2")
f2.save()
w1 = Whiz(name="Whiz1")
w1.save()
b1 = Bar(name="Bar1", normal=f1, fwd=w1, back=f2)
b1.save()
self.assertEqual(b1.normal, f1)
self.assertEqual(b1.fwd, w1)
self.assertEqual(b1.back, f2)
base1 = Base(name="Base1")
base1.save()
child1 = Child(name="Child1", parent=base1)
child1.save()
self.assertEqual(child1.parent, base1)
def test_unicode_chars_in_queries(self):
"""
Regression tests for #3937
make sure we can use unicode characters in queries.
If these tests fail on MySQL, it's a problem with the test setup.
A properly configured UTF-8 database can handle this.
"""
fx = Foo(name="Bjorn", friend="François")
fx.save()
self.assertEqual(Foo.objects.get(friend__contains="\xe7"), fx)
def test_queries_on_textfields(self):
"""
Regression tests for #5087
make sure we can perform queries on TextFields.
"""
a = Article(name="Test", text="The quick brown fox jumps over the lazy dog.")
a.save()
self.assertEqual(
Article.objects.get(
text__exact="The quick brown fox jumps over the lazy dog."
),
a,
)
self.assertEqual(Article.objects.get(text__contains="quick brown fox"), a)
def test_ipaddress_on_postgresql(self):
"""
Regression test for #708
"like" queries on IP address fields require casting with HOST() (on
PostgreSQL).
"""
a = Article(name="IP test", text="The body", submitted_from="192.0.2.100")
a.save()
self.assertSequenceEqual(
Article.objects.filter(submitted_from__contains="192.0.2"), [a]
)
# The searches do not match the subnet mask (/32 in this case)
self.assertEqual(
Article.objects.filter(submitted_from__contains="32").count(), 0
)
| StringLookupTests |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/external_data.py | {
"start": 12281,
"end": 12444
} | class ____:
job_name: str
mode: str
op_selection: Optional[Sequence[str]]
@whitelist_for_serdes(storage_name="ExternalSensorMetadata")
@record
| TargetSnap |
python | facebookresearch__faiss | benchs/bench_fw/optimize.py | {
"start": 648,
"end": 11349
} | class ____:
distance_metric: str = "L2"
num_threads: int = 32
run_local: bool = True
def __post_init__(self):
self.cached_benchmark = None
if self.distance_metric == "IP":
self.distance_metric_type = faiss.METRIC_INNER_PRODUCT
elif self.distance_metric == "L2":
self.distance_metric_type = faiss.METRIC_L2
else:
raise ValueError
def set_io(self, benchmark_io):
self.io = benchmark_io
self.io.distance_metric = self.distance_metric
self.io.distance_metric_type = self.distance_metric_type
def benchmark_and_filter_candidates(
self,
index_descs,
training_vectors,
database_vectors,
query_vectors,
result_file,
include_flat,
min_accuracy,
pareto_metric,
):
benchmark = Benchmark(
num_threads=self.num_threads,
training_vectors=training_vectors,
database_vectors=database_vectors,
query_vectors=query_vectors,
index_descs=index_descs,
k=10,
distance_metric=self.distance_metric,
)
benchmark.set_io(self.io)
results = benchmark.benchmark(
result_file=result_file, local=self.run_local, train=True, knn=True
)
assert results
filtered = filter_results(
results=results,
evaluation="knn",
accuracy_metric="knn_intersection",
min_accuracy=min_accuracy,
name_filter=None
if include_flat
else (lambda n: not n.startswith("Flat")),
pareto_mode=ParetoMode.GLOBAL,
pareto_metric=pareto_metric,
)
assert filtered
index_descs = [
IndexDescriptorClassic(
factory=v["factory"],
construction_params=v["construction_params"],
search_params=v["search_params"],
)
for _, _, _, _, v in filtered
]
return index_descs, filtered
def optimize_quantizer(
self,
training_vectors: DatasetDescriptor,
query_vectors: DatasetDescriptor,
nlists: List[int],
min_accuracy: float,
):
quantizer_descs = {}
for nlist in nlists:
# cluster
centroids, _, _ = training_vectors.k_means(
self.io,
nlist,
dry_run=False,
)
descs = [IndexDescriptorClassic(factory="Flat"),] + [
IndexDescriptorClassic(
factory="HNSW32",
construction_params=[{"efConstruction": 2**i}],
)
for i in range(6, 11)
]
descs, _ = self.benchmark_and_filter_candidates(
descs,
training_vectors=centroids,
database_vectors=centroids,
query_vectors=query_vectors,
result_file=f"result_{centroids.get_filename()}json",
include_flat=True,
min_accuracy=min_accuracy,
pareto_metric=ParetoMetric.TIME,
)
quantizer_descs[nlist] = descs
return quantizer_descs
def optimize_ivf(
self,
result_file: str,
training_vectors: DatasetDescriptor,
database_vectors: DatasetDescriptor,
query_vectors: DatasetDescriptor,
quantizers: Dict[int, List[IndexDescriptorClassic]],
codecs: List[Tuple[str, str]],
min_accuracy: float,
):
ivf_descs = []
for nlist, quantizer_descs in quantizers.items():
# build IVF index
for quantizer_desc in quantizer_descs:
for pretransform, fine_ivf in codecs:
if pretransform is None:
pretransform = ""
else:
pretransform = pretransform + ","
if quantizer_desc.construction_params is None:
construction_params = [
None,
quantizer_desc.search_params,
]
else:
construction_params = [
None
] + quantizer_desc.construction_params
if quantizer_desc.search_params is not None:
dict_merge(
construction_params[1],
quantizer_desc.search_params,
)
ivf_descs.append(
IndexDescriptorClassic(
factory=f"{pretransform}IVF{nlist}({quantizer_desc.factory}),{fine_ivf}",
construction_params=construction_params,
)
)
return self.benchmark_and_filter_candidates(
ivf_descs,
training_vectors,
database_vectors,
query_vectors,
result_file,
include_flat=False,
min_accuracy=min_accuracy,
pareto_metric=ParetoMetric.TIME_SPACE,
)
# train an IVFFlat index
# find the nprobe required for the given accuracy
def ivf_flat_nprobe_required_for_accuracy(
self,
result_file: str,
training_vectors: DatasetDescriptor,
database_vectors: DatasetDescriptor,
query_vectors: DatasetDescriptor,
nlist,
accuracy,
):
_, results = self.benchmark_and_filter_candidates(
index_descs=[
IndexDescriptorClassic(factory=f"IVF{nlist}(Flat),Flat"),
],
training_vectors=training_vectors,
database_vectors=database_vectors,
query_vectors=query_vectors,
result_file=result_file,
include_flat=False,
min_accuracy=accuracy,
pareto_metric=ParetoMetric.TIME,
)
nprobe = nlist // 2
for _, _, _, k, v in results:
if (
".knn" in k
and "nprobe" in v["search_params"]
and v["knn_intersection"] >= accuracy
):
nprobe = min(nprobe, v["search_params"]["nprobe"])
return nprobe
# train candidate IVF codecs
# benchmark them at the same nprobe
# keep only the space _and_ time Pareto optimal
def optimize_codec(
self,
result_file: str,
d: int,
training_vectors: DatasetDescriptor,
database_vectors: DatasetDescriptor,
query_vectors: DatasetDescriptor,
nlist: int,
nprobe: int,
min_accuracy: float,
):
codecs = (
[
(None, "Flat"),
(None, "SQfp16"),
(None, "SQbf16"),
(None, "SQ8"),
(None, "SQ8_direct_signed"),
] + [
(f"OPQ{M}_{M * dim}", f"PQ{M}x{b}")
for M in [8, 12, 16, 32, 48, 64, 96, 128, 192, 256]
if d % M == 0
for dim in range(2, 18, 2)
if M * dim <= d
for b in range(4, 14, 2)
if M * b < d * 8 # smaller than SQ8
] + [
(None, f"PQ{M}x{b}")
for M in [8, 12, 16, 32, 48, 64, 96, 128, 192, 256]
if d % M == 0
for b in range(8, 14, 2)
if M * b < d * 8 # smaller than SQ8
]
)
factory = {}
for opq, pq in codecs:
factory[
f"IVF{nlist},{pq}" if opq is None else f"{opq},IVF{nlist},{pq}"
] = (
opq,
pq,
)
_, filtered = self.benchmark_and_filter_candidates(
index_descs=[
IndexDescriptorClassic(
factory=f"IVF{nlist},{pq}"
if opq is None
else f"{opq},IVF{nlist},{pq}",
search_params={
"nprobe": nprobe,
},
)
for opq, pq in codecs
],
training_vectors=training_vectors,
database_vectors=database_vectors,
query_vectors=query_vectors,
result_file=result_file,
include_flat=False,
min_accuracy=min_accuracy,
pareto_metric=ParetoMetric.TIME_SPACE,
)
results = [
factory[r] for r in set(v["factory"] for _, _, _, k, v in filtered)
]
return results
def optimize(
self,
d: int,
training_vectors: DatasetDescriptor,
database_vectors_list: List[DatasetDescriptor],
query_vectors: DatasetDescriptor,
min_accuracy: float,
):
# train an IVFFlat index
# find the nprobe required for near perfect accuracy
nlist = 4096
nprobe_at_95 = self.ivf_flat_nprobe_required_for_accuracy(
result_file=f"result_ivf{nlist}_flat.json",
training_vectors=training_vectors,
database_vectors=database_vectors_list[0],
query_vectors=query_vectors,
nlist=nlist,
accuracy=0.95,
)
# train candidate IVF codecs
# benchmark them at the same nprobe
# keep only the space and time Pareto optima
codecs = self.optimize_codec(
result_file=f"result_ivf{nlist}_codec.json",
d=d,
training_vectors=training_vectors,
database_vectors=database_vectors_list[0],
query_vectors=query_vectors,
nlist=nlist,
nprobe=nprobe_at_95,
min_accuracy=min_accuracy,
)
# optimize coarse quantizers
quantizers = self.optimize_quantizer(
training_vectors=training_vectors,
query_vectors=query_vectors,
nlists=[4096, 8192, 16384, 32768],
min_accuracy=0.7,
)
# combine them with the codecs
# test them at different scales
for database_vectors in database_vectors_list:
self.optimize_ivf(
result_file=f"result_{database_vectors.get_filename()}json",
training_vectors=training_vectors,
database_vectors=database_vectors,
query_vectors=query_vectors,
quantizers=quantizers,
codecs=codecs,
min_accuracy=min_accuracy,
)
| Optimizer |
python | pypa__pip | src/pip/_vendor/rich/terminal_theme.py | {
"start": 149,
"end": 3370
} | class ____:
"""A color theme used when exporting console content.
Args:
background (Tuple[int, int, int]): The background color.
foreground (Tuple[int, int, int]): The foreground (text) color.
normal (List[Tuple[int, int, int]]): A list of 8 normal intensity colors.
bright (List[Tuple[int, int, int]], optional): A list of 8 bright colors, or None
to repeat normal intensity. Defaults to None.
"""
def __init__(
self,
background: _ColorTuple,
foreground: _ColorTuple,
normal: List[_ColorTuple],
bright: Optional[List[_ColorTuple]] = None,
) -> None:
self.background_color = ColorTriplet(*background)
self.foreground_color = ColorTriplet(*foreground)
self.ansi_colors = Palette(normal + (bright or normal))
DEFAULT_TERMINAL_THEME = TerminalTheme(
(255, 255, 255),
(0, 0, 0),
[
(0, 0, 0),
(128, 0, 0),
(0, 128, 0),
(128, 128, 0),
(0, 0, 128),
(128, 0, 128),
(0, 128, 128),
(192, 192, 192),
],
[
(128, 128, 128),
(255, 0, 0),
(0, 255, 0),
(255, 255, 0),
(0, 0, 255),
(255, 0, 255),
(0, 255, 255),
(255, 255, 255),
],
)
MONOKAI = TerminalTheme(
(12, 12, 12),
(217, 217, 217),
[
(26, 26, 26),
(244, 0, 95),
(152, 224, 36),
(253, 151, 31),
(157, 101, 255),
(244, 0, 95),
(88, 209, 235),
(196, 197, 181),
(98, 94, 76),
],
[
(244, 0, 95),
(152, 224, 36),
(224, 213, 97),
(157, 101, 255),
(244, 0, 95),
(88, 209, 235),
(246, 246, 239),
],
)
DIMMED_MONOKAI = TerminalTheme(
(25, 25, 25),
(185, 188, 186),
[
(58, 61, 67),
(190, 63, 72),
(135, 154, 59),
(197, 166, 53),
(79, 118, 161),
(133, 92, 141),
(87, 143, 164),
(185, 188, 186),
(136, 137, 135),
],
[
(251, 0, 31),
(15, 114, 47),
(196, 112, 51),
(24, 109, 227),
(251, 0, 103),
(46, 112, 109),
(253, 255, 185),
],
)
NIGHT_OWLISH = TerminalTheme(
(255, 255, 255),
(64, 63, 83),
[
(1, 22, 39),
(211, 66, 62),
(42, 162, 152),
(218, 170, 1),
(72, 118, 214),
(64, 63, 83),
(8, 145, 106),
(122, 129, 129),
(122, 129, 129),
],
[
(247, 110, 110),
(73, 208, 197),
(218, 194, 107),
(92, 167, 228),
(105, 112, 152),
(0, 201, 144),
(152, 159, 177),
],
)
SVG_EXPORT_THEME = TerminalTheme(
(41, 41, 41),
(197, 200, 198),
[
(75, 78, 85),
(204, 85, 90),
(152, 168, 75),
(208, 179, 68),
(96, 138, 177),
(152, 114, 159),
(104, 160, 179),
(197, 200, 198),
(154, 155, 153),
],
[
(255, 38, 39),
(0, 130, 61),
(208, 132, 66),
(25, 132, 233),
(255, 44, 122),
(57, 130, 128),
(253, 253, 197),
],
)
| TerminalTheme |
python | sympy__sympy | sympy/functions/elementary/exponential.py | {
"start": 36790,
"end": 42582
} | class ____(DefinedFunction):
r"""
The Lambert W function $W(z)$ is defined as the inverse
function of $w \exp(w)$ [1]_.
Explanation
===========
In other words, the value of $W(z)$ is such that $z = W(z) \exp(W(z))$
for any complex number $z$. The Lambert W function is a multivalued
function with infinitely many branches $W_k(z)$, indexed by
$k \in \mathbb{Z}$. Each branch gives a different solution $w$
of the equation $z = w \exp(w)$.
The Lambert W function has two partially real branches: the
principal branch ($k = 0$) is real for real $z > -1/e$, and the
$k = -1$ branch is real for $-1/e < z < 0$. All branches except
$k = 0$ have a logarithmic singularity at $z = 0$.
Examples
========
>>> from sympy import LambertW
>>> LambertW(1.2)
0.635564016364870
>>> LambertW(1.2, -1).n()
-1.34747534407696 - 4.41624341514535*I
>>> LambertW(-1).is_real
False
References
==========
.. [1] https://en.wikipedia.org/wiki/Lambert_W_function
"""
_singularities = (-Pow(S.Exp1, -1, evaluate=False), S.ComplexInfinity)
@classmethod
def eval(cls, x, k=None):
if k == S.Zero:
return cls(x)
elif k is None:
k = S.Zero
if k.is_zero:
if x.is_zero:
return S.Zero
if x is S.Exp1:
return S.One
if x == -1/S.Exp1:
return S.NegativeOne
if x == -log(2)/2:
return -log(2)
if x == 2*log(2):
return log(2)
if x == -pi/2:
return I*pi/2
if x == exp(1 + S.Exp1):
return S.Exp1
if x is S.Infinity:
return S.Infinity
if fuzzy_not(k.is_zero):
if x.is_zero:
return S.NegativeInfinity
if k is S.NegativeOne:
if x == -pi/2:
return -I*pi/2
elif x == -1/S.Exp1:
return S.NegativeOne
elif x == -2*exp(-2):
return -Integer(2)
def fdiff(self, argindex=1):
"""
Return the first derivative of this function.
"""
x = self.args[0]
if len(self.args) == 1:
if argindex == 1:
return LambertW(x)/(x*(1 + LambertW(x)))
else:
k = self.args[1]
if argindex == 1:
return LambertW(x, k)/(x*(1 + LambertW(x, k)))
raise ArgumentIndexError(self, argindex)
def _eval_is_extended_real(self):
x = self.args[0]
if len(self.args) == 1:
k = S.Zero
else:
k = self.args[1]
if k.is_zero:
if (x + 1/S.Exp1).is_positive:
return True
elif (x + 1/S.Exp1).is_nonpositive:
return False
elif (k + 1).is_zero:
if x.is_negative and (x + 1/S.Exp1).is_positive:
return True
elif x.is_nonpositive or (x + 1/S.Exp1).is_nonnegative:
return False
elif fuzzy_not(k.is_zero) and fuzzy_not((k + 1).is_zero):
if x.is_extended_real:
return False
def _eval_is_finite(self):
return self.args[0].is_finite
def _eval_is_algebraic(self):
s = self.func(*self.args)
if s.func == self.func:
if fuzzy_not(self.args[0].is_zero) and self.args[0].is_algebraic:
return False
else:
return s.is_algebraic
def _eval_as_leading_term(self, x, logx, cdir):
if len(self.args) == 1:
arg = self.args[0]
arg0 = arg.subs(x, 0).cancel()
if not arg0.is_zero:
return self.func(arg0)
return arg.as_leading_term(x)
def _eval_nseries(self, x, n, logx, cdir=0):
if len(self.args) == 1:
from sympy.functions.elementary.integers import ceiling
from sympy.series.order import Order
arg = self.args[0].nseries(x, n=n, logx=logx)
lt = arg.as_leading_term(x, logx=logx)
lte = 1
if lt.is_Pow:
lte = lt.exp
if ceiling(n/lte) >= 1:
s = Add(*[(-S.One)**(k - 1)*Integer(k)**(k - 2)/
factorial(k - 1)*arg**k for k in range(1, ceiling(n/lte))])
s = expand_multinomial(s)
else:
s = S.Zero
return s + Order(x**n, x)
return super()._eval_nseries(x, n, logx)
def _eval_is_zero(self):
x = self.args[0]
if len(self.args) == 1:
return x.is_zero
else:
return fuzzy_and([x.is_zero, self.args[1].is_zero])
@cacheit
def _log_atan_table():
return {
# first quadrant only
sqrt(3): pi / 3,
1: pi / 4,
sqrt(5 - 2 * sqrt(5)): pi / 5,
sqrt(2) * sqrt(5 - sqrt(5)) / (1 + sqrt(5)): pi / 5,
sqrt(5 + 2 * sqrt(5)): pi * Rational(2, 5),
sqrt(2) * sqrt(sqrt(5) + 5) / (-1 + sqrt(5)): pi * Rational(2, 5),
sqrt(3) / 3: pi / 6,
sqrt(2) - 1: pi / 8,
sqrt(2 - sqrt(2)) / sqrt(sqrt(2) + 2): pi / 8,
sqrt(2) + 1: pi * Rational(3, 8),
sqrt(sqrt(2) + 2) / sqrt(2 - sqrt(2)): pi * Rational(3, 8),
sqrt(1 - 2 * sqrt(5) / 5): pi / 10,
(-sqrt(2) + sqrt(10)) / (2 * sqrt(sqrt(5) + 5)): pi / 10,
sqrt(1 + 2 * sqrt(5) / 5): pi * Rational(3, 10),
(sqrt(2) + sqrt(10)) / (2 * sqrt(5 - sqrt(5))): pi * Rational(3, 10),
2 - sqrt(3): pi / 12,
(-1 + sqrt(3)) / (1 + sqrt(3)): pi / 12,
2 + sqrt(3): pi * Rational(5, 12),
(1 + sqrt(3)) / (-1 + sqrt(3)): pi * Rational(5, 12)
}
| LambertW |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/stripe/tests.py | {
"start": 240,
"end": 1750
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = StripeProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""{
"id": "acct_sometestid",
"object": "account",
"business_logo": null,
"business_name": null,
"business_url": "example.com",
"charges_enabled": true,
"country": "SE",
"currencies_supported": [
"usd",
"eur",
"sek"
],
"default_currency": "eur",
"details_submitted": true,
"display_name": "Test",
"email": "test@example.com",
"managed": false,
"metadata": {},
"statement_descriptor": "TEST.COM",
"support_phone": "+460123456789",
"timezone": "Europe/Stockholm",
"transfers_enabled": true
}""",
)
def get_expected_to_str(self):
return "test@example.com"
def get_login_response_json(self, with_refresh_token=True):
rt = ""
if with_refresh_token:
rt = ',"refresh_token": "testrf"'
return (
"""{
"uid":"weibo",
"access_token":"testac",
"livemode": false,
"token_type": "bearer",
"stripe_publishable_key": "pk_test_someteskey",
"stripe_user_id": "acct_sometestid",
"scope": "read_write"
%s }"""
% rt
)
| StripeTests |
python | huggingface__transformers | tests/models/dpt/test_modeling_dpt_auto_backbone.py | {
"start": 1447,
"end": 4671
} | class ____:
def __init__(
self,
parent,
batch_size=2,
num_channels=3,
image_size=32,
patch_size=16,
use_labels=True,
num_labels=3,
is_training=True,
hidden_size=4,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=8,
out_features=["stage1", "stage2"],
apply_layernorm=False,
reshape_hidden_states=False,
neck_hidden_sizes=[2, 2],
fusion_hidden_size=6,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.out_features = out_features
self.apply_layernorm = apply_layernorm
self.reshape_hidden_states = reshape_hidden_states
self.use_labels = use_labels
self.num_labels = num_labels
self.is_training = is_training
self.neck_hidden_sizes = neck_hidden_sizes
self.fusion_hidden_size = fusion_hidden_size
# DPT's sequence length
self.seq_length = (self.image_size // self.patch_size) ** 2 + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return DPTConfig(
backbone_config=self.get_backbone_config(),
backbone=None,
neck_hidden_sizes=self.neck_hidden_sizes,
fusion_hidden_size=self.fusion_hidden_size,
)
def get_backbone_config(self):
return Dinov2Config(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
is_training=self.is_training,
out_features=self.out_features,
reshape_hidden_states=self.reshape_hidden_states,
)
def create_and_check_for_depth_estimation(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = DPTForDepthEstimation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| DPTModelTester |
python | gevent__gevent | src/gevent/tests/test__socket_dns6.py | {
"start": 1177,
"end": 2890
} | class ____(TestCase):
NORMALIZE_GHBA_IGNORE_ALIAS = True
# host that only has AAAA record
host = 'aaaa.test-ipv6.com'
def _normalize_result_gethostbyaddr(self, result):
# This part of the test is effectively disabled. There are multiple address
# that resolve and which ones you get depend on the settings
# of the system and ares. They don't match exactly.
return ()
if RESOLVER_ARES and PY2:
def _normalize_result_getnameinfo(self, result):
# Beginning 2020-07-23,
# c-ares returns a scope id on the result:
# ('2001:470:1:18::115%0', 'http')
# The standard library does not (on linux or os x).
# I've only seen '%0', so only remove that
ipaddr, service = result
if ipaddr.endswith('%0'):
ipaddr = ipaddr[:-2]
return (ipaddr, service)
if not OSX and RESOLVER_DNSPYTHON:
# It raises gaierror instead of socket.error,
# which is not great and leads to failures.
def _run_test_getnameinfo(self, *_args, **_kwargs):
return (), 0, (), 0
def _run_test_gethostbyname(self, *_args, **_kwargs):
raise unittest.SkipTest("gethostbyname[_ex] does not support IPV6")
_run_test_gethostbyname_ex = _run_test_gethostbyname
def test_empty(self):
self._test('getaddrinfo', self.host, 'http')
def test_inet(self):
self._test('getaddrinfo', self.host, None, socket.AF_INET)
def test_inet6(self):
self._test('getaddrinfo', self.host, None, socket.AF_INET6)
def test_unspec(self):
self._test('getaddrinfo', self.host, None, socket.AF_UNSPEC)
| Test6 |
python | openai__openai-python | src/openai/types/realtime/realtime_conversation_item_assistant_message_param.py | {
"start": 914,
"end": 1683
} | class ____(TypedDict, total=False):
content: Required[Iterable[Content]]
"""The content of the message."""
role: Required[Literal["assistant"]]
"""The role of the message sender. Always `assistant`."""
type: Required[Literal["message"]]
"""The type of the item. Always `message`."""
id: str
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Literal["realtime.item"]
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Literal["completed", "incomplete", "in_progress"]
"""The status of the item. Has no effect on the conversation."""
| RealtimeConversationItemAssistantMessageParam |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_resource_claim_spec.py | {
"start": 383,
"end": 3469
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'devices': 'V1beta1DeviceClaim'
}
attribute_map = {
'devices': 'devices'
}
def __init__(self, devices=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ResourceClaimSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._devices = None
self.discriminator = None
if devices is not None:
self.devices = devices
@property
def devices(self):
"""Gets the devices of this V1beta1ResourceClaimSpec. # noqa: E501
:return: The devices of this V1beta1ResourceClaimSpec. # noqa: E501
:rtype: V1beta1DeviceClaim
"""
return self._devices
@devices.setter
def devices(self, devices):
"""Sets the devices of this V1beta1ResourceClaimSpec.
:param devices: The devices of this V1beta1ResourceClaimSpec. # noqa: E501
:type: V1beta1DeviceClaim
"""
self._devices = devices
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ResourceClaimSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ResourceClaimSpec):
return True
return self.to_dict() != other.to_dict()
| V1beta1ResourceClaimSpec |
python | gevent__gevent | src/greentest/3.9/test_socket.py | {
"start": 207132,
"end": 207994
} | class ____(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
| UDPTimeoutTest |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sql_datasource.py | {
"start": 9525,
"end": 10478
} | class ____(FluentBaseModel):
column_name: str
method_name: str
sort_ascending: bool = True
@property
def columns(self) -> list[str]:
return [self.column_name]
def param_defaults(self, sql_asset: _SQLAsset) -> list[dict]:
batch_identifier_data = _partitioner_and_sql_asset_to_batch_identifier_data(
partitioner=self, asset=sql_asset
)
params: list[dict] = []
for identifer_data in batch_identifier_data:
params.append({self.param_names[0]: identifer_data[self.column_name]})
return params
@property
def param_names(self) -> list[str]:
raise NotImplementedError
def partitioner_method_kwargs(self) -> Dict[str, Any]:
raise NotImplementedError
def batch_parameters_to_batch_spec_kwarg_identifiers(
self, options: BatchParameters
) -> Dict[str, Any]:
raise NotImplementedError
| _PartitionerOneColumnOneParam |
python | ashishps1__awesome-system-design-resources | implementations/python/load_balancing_algorithms/least_response_time.py | {
"start": 27,
"end": 1073
} | class ____:
def __init__(self, servers):
self.servers = servers
self.response_times = [0] * len(servers)
def get_next_server(self):
min_response_time = min(self.response_times)
min_index = self.response_times.index(min_response_time)
return self.servers[min_index]
def update_response_time(self, server, response_time):
index = self.servers.index(server)
self.response_times[index] = response_time
# Simulated server response time function
def simulate_response_time():
# Simulating response time with random delay
delay = random.uniform(0.1, 1.0)
time.sleep(delay)
return delay
# Example usage
servers = ["Server1", "Server2", "Server3"]
load_balancer = LeastResponseTime(servers)
for i in range(6):
server = load_balancer.get_next_server()
print(f"Request {i + 1} -> {server}")
response_time = simulate_response_time()
load_balancer.update_response_time(server, response_time)
print(f"Response Time: {response_time:.2f}s") | LeastResponseTime |
python | ray-project__ray | python/ray/air/execution/resources/fixed.py | {
"start": 1121,
"end": 5544
} | class ____(ResourceManager):
"""Fixed budget based resource manager.
This resource manager keeps track of a fixed set of resources. When resources
are acquired, they are subtracted from the budget. When resources are freed,
they are added back to the budget.
The resource manager still requires resources to be requested before they become
available. However, because the resource requests are virtual, this will not
trigger autoscaling.
Additionally, resources are not reserved on request, only on acquisition. Thus,
acquiring a resource can change the availability of other requests. Note that
this behavior may be changed in future implementations.
The fixed resource manager does not support placement strategies. Using
``STRICT_SPREAD`` will result in an error. ``STRICT_PACK`` will succeed only
within a placement group bundle. All other placement group arguments will be
ignored.
Args:
total_resources: Budget of resources to manage. Defaults to all available
resources in the current task or all cluster resources (if outside a task).
"""
_resource_cls: AcquiredResources = FixedAcquiredResources
def __init__(self, total_resources: Optional[Dict[str, float]] = None):
rtc = ray.get_runtime_context()
if not total_resources:
if rtc.worker.mode in {None, SCRIPT_MODE, LOCAL_MODE}:
total_resources = ray.cluster_resources()
else:
total_resources = rtc.get_assigned_resources()
# If we are in a placement group, all of our resources will be in a bundle
# and thus fulfill requirements of STRICT_PACK - but only if child tasks
# are captured by the pg.
self._allow_strict_pack = (
ray.util.get_current_placement_group() is not None
and rtc.should_capture_child_tasks_in_placement_group
)
self._total_resources = total_resources
self._requested_resources = []
self._used_resources = []
@property
def _available_resources(self) -> Dict[str, float]:
available_resources = self._total_resources.copy()
for used_resources in self._used_resources:
all_resources = used_resources.required_resources
for k, v in all_resources.items():
available_resources[k] = (
available_resources[k] * _DIGITS - v * _DIGITS
) / _DIGITS
return available_resources
def request_resources(self, resource_request: ResourceRequest):
if resource_request.strategy == "STRICT_SPREAD" or (
not self._allow_strict_pack and resource_request.strategy == "STRICT_PACK"
):
raise RuntimeError(
f"Requested a resource with placement strategy "
f"{resource_request.strategy}, but this cannot be fulfilled by a "
f"FixedResourceManager. In a nested setting, please set the inner "
f"placement strategy to be less restrictive (i.e. no STRICT_ strategy)."
)
self._requested_resources.append(resource_request)
def cancel_resource_request(self, resource_request: ResourceRequest):
self._requested_resources.remove(resource_request)
def has_resources_ready(self, resource_request: ResourceRequest) -> bool:
if resource_request not in self._requested_resources:
return False
available_resources = self._available_resources
all_resources = resource_request.required_resources
for k, v in all_resources.items():
if available_resources.get(k, 0.0) < v:
return False
return True
def acquire_resources(
self, resource_request: ResourceRequest
) -> Optional[AcquiredResources]:
if not self.has_resources_ready(resource_request):
return None
self._used_resources.append(resource_request)
return self._resource_cls(
bundles=resource_request.bundles, resource_request=resource_request
)
def free_resources(self, acquired_resource: AcquiredResources):
resources = acquired_resource.resource_request
self._used_resources.remove(resources)
def clear(self):
# Reset internal state
self._requested_resources = []
self._used_resources = []
| FixedResourceManager |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/inputs.py | {
"start": 8792,
"end": 9158
} | class ____(graphene.InputObjectType):
eventType = graphene.NonNull(GrapheneRunlessAssetEventType)
assetKey = graphene.NonNull(GrapheneAssetKeyInput)
partitionKeys = graphene.InputField(graphene.List(graphene.String))
description = graphene.String()
class Meta:
name = "ReportRunlessAssetEventsParams"
| GrapheneReportRunlessAssetEventsParams |
python | openai__openai-python | src/openai/types/beta/realtime/session_update_event.py | {
"start": 524,
"end": 883
} | class ____(BaseModel):
anchor: Literal["created_at"]
"""The anchor point for the ephemeral token expiration.
Only `created_at` is currently supported.
"""
seconds: Optional[int] = None
"""The number of seconds from the anchor point to the expiration.
Select a value between `10` and `7200`.
"""
| SessionClientSecretExpiresAfter |
python | dateutil__dateutil | src/dateutil/rrule.py | {
"start": 42790,
"end": 50715
} | class ____(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365 + calendar.isleap(year)
self.nextyearlen = 365 + calendar.isleap(year + 1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
# no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst) % 7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen +
(lyearweekday-rr._wkst) % 7) % 7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and (month != self.lastmonth or
year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday) % 7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday) % 7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
dset = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
dset[i] = i
return dset, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
dset = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
dset[i] = i
i += 1
# if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return dset, start, i
def ddayset(self, year, month, day):
dset = [None] * self.yearlen
i = datetime.date(year, month, day).toordinal() - self.yearordinal
dset[i] = i
return dset, i, i + 1
def htimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
tset.sort()
return tset
def mtimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
tset.sort()
return tset
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
| _iterinfo |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/hooks/test_msgraph.py | {
"start": 2431,
"end": 12822
} | class ____:
@staticmethod
def assert_tenant_id(request_adapter: RequestAdapter, expected_tenant_id: str):
adapter: HttpxRequestAdapter = cast("HttpxRequestAdapter", request_adapter)
auth_provider: BaseBearerTokenAuthenticationProvider = cast(
"BaseBearerTokenAuthenticationProvider",
adapter._authentication_provider,
)
access_token_provider: AzureIdentityAccessTokenProvider = cast(
"AzureIdentityAccessTokenProvider",
auth_provider.access_token_provider,
)
credentials: MsalCredential = cast("MsalCredential", access_token_provider._credentials)
tenant_id = credentials._tenant_id
assert tenant_id == expected_tenant_id
def test_get_conn(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
with pytest.warns(
DeprecationWarning,
match="get_conn is deprecated, please use the async get_async_conn method!",
):
actual = hook.get_conn()
assert isinstance(actual, HttpxRequestAdapter)
assert actual.base_url == "https://graph.microsoft.com/v1.0/"
@pytest.mark.asyncio
async def test_get_async_conn(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
actual = await hook.get_async_conn()
assert isinstance(actual, HttpxRequestAdapter)
assert actual.base_url == "https://graph.microsoft.com/v1.0/"
@pytest.mark.asyncio
async def test_get_async_conn_with_custom_base_url(self):
with patch_hook(
side_effect=lambda conn_id: get_airflow_connection(
conn_id=conn_id,
host="api.fabric.microsoft.com",
api_version="v1",
)
):
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
actual = await hook.get_async_conn()
assert isinstance(actual, HttpxRequestAdapter)
assert actual.base_url == "https://api.fabric.microsoft.com/v1/"
@pytest.mark.asyncio
async def test_get_async_conn_with_proxies_as_string(self):
with patch_hook(
side_effect=lambda conn_id: get_airflow_connection(
conn_id=conn_id,
host="api.fabric.microsoft.com",
api_version="v1",
proxies="{'http': 'http://proxy:80', 'https': 'https://proxy:80'}",
)
):
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
actual = await hook.get_async_conn()
assert isinstance(actual, HttpxRequestAdapter)
assert actual._http_client._mounts.get(URLPattern("http://"))
assert actual._http_client._mounts.get(URLPattern("https://"))
@pytest.mark.asyncio
async def test_get_async_conn_with_proxies_as_invalid_string(self):
with patch_hook(
side_effect=lambda conn_id: get_airflow_connection(
conn_id=conn_id,
host="api.fabric.microsoft.com",
api_version="v1",
proxies='["http://proxy:80", "https://proxy:80"]',
)
):
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
with pytest.raises(AirflowConfigException):
await hook.get_async_conn()
@pytest.mark.asyncio
async def test_get_async_conn_with_proxies_as_json(self):
with patch_hook(
side_effect=lambda conn_id: get_airflow_connection(
conn_id=conn_id,
host="api.fabric.microsoft.com",
api_version="v1",
proxies='{"http": "http://proxy:80", "https": "https://proxy:80"}',
)
):
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
actual = await hook.get_async_conn()
assert isinstance(actual, HttpxRequestAdapter)
assert actual._http_client._mounts.get(URLPattern("http://"))
assert actual._http_client._mounts.get(URLPattern("https://"))
def test_scopes_when_default(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
assert hook.scopes == [KiotaRequestAdapterHook.DEFAULT_SCOPE]
def test_scopes_when_passed_as_string(self):
with patch_hook():
hook = KiotaRequestAdapterHook(
conn_id="msgraph_api", scopes="https://microsoft.sharepoint.com/.default"
)
assert hook.scopes == ["https://microsoft.sharepoint.com/.default"]
def test_scopes_when_passed_as_list(self):
with patch_hook():
hook = KiotaRequestAdapterHook(
conn_id="msgraph_api", scopes=["https://microsoft.sharepoint.com/.default"]
)
assert hook.scopes == ["https://microsoft.sharepoint.com/.default"]
def test_api_version(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api", api_version=APIVersion.v1.value)
assert hook.api_version == APIVersion.v1.value
def test_api_version_when_none_is_explicitly_passed_as_api_version(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api", api_version=None)
assert not hook.api_version
def test_get_api_version_when_empty_config_dict(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
actual = hook.get_api_version({})
assert actual == APIVersion.v1.value
def test_get_api_version_when_api_version_in_config_dict(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
actual = hook.get_api_version({"api_version": "beta"})
assert actual == APIVersion.beta.value
def test_get_api_version_when_custom_api_version_in_config_dict(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api", api_version="v1")
actual = hook.get_api_version({})
assert actual == "v1"
def test_get_host_when_connection_has_scheme_and_host(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
connection = mock_connection(schema="https", host="graph.microsoft.de")
actual = hook.get_host(connection)
assert actual == NationalClouds.Germany.value
def test_get_host_when_connection_has_no_scheme_or_host(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
connection = mock_connection()
actual = hook.get_host(connection)
assert actual == NationalClouds.Global.value
@pytest.mark.asyncio
async def test_tenant_id(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
actual = await hook.get_async_conn()
self.assert_tenant_id(actual, "tenant-id")
@pytest.mark.asyncio
async def test_azure_tenant_id(self):
with patch_hook(
side_effect=lambda conn_id: get_airflow_connection(
conn_id=conn_id,
azure_tenant_id="azure-tenant-id",
)
):
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
actual = await hook.get_async_conn()
self.assert_tenant_id(actual, "azure-tenant-id")
def test_encoded_query_parameters(self):
actual = KiotaRequestAdapterHook.encoded_query_parameters(
query_parameters={"$expand": "reports,users,datasets,dataflows,dashboards", "$top": 5000},
)
assert actual == {"%24expand": "reports,users,datasets,dataflows,dashboards", "%24top": 5000}
@pytest.mark.asyncio
async def test_request_information_with_custom_host(self):
with patch_hook(
side_effect=lambda conn_id: get_airflow_connection(
conn_id=conn_id,
host="api.fabric.microsoft.com",
api_version="v1",
)
):
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
request_info = hook.request_information(url="myorg/admin/apps", query_parameters={"$top": 5000})
request_adapter = await hook.get_async_conn()
request_adapter.set_base_url_for_request_information(request_info)
assert isinstance(request_info, RequestInformation)
assert isinstance(request_adapter, HttpxRequestAdapter)
assert request_info.url == "https://api.fabric.microsoft.com/v1/myorg/admin/apps?%24top=5000"
@pytest.mark.asyncio
async def test_throw_failed_responses_with_text_plain_content_type(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
response = Mock(spec=Response)
response.headers = {"content-type": "text/plain"}
response.status_code = 429
response.content = b"TenantThrottleThresholdExceeded"
response.is_success = False
span = Mock(spec=Span)
conn = await hook.get_async_conn()
actual = await conn.get_root_parse_node(response, span, span)
assert isinstance(actual, TextParseNode)
assert actual.get_str_value() == "TenantThrottleThresholdExceeded"
@pytest.mark.asyncio
async def test_throw_failed_responses_with_application_json_content_type(self):
with patch_hook():
hook = KiotaRequestAdapterHook(conn_id="msgraph_api")
response = Mock(spec=Response)
response.headers = {"content-type": "application/json"}
response.status_code = 429
response.content = b'{"error": {"code": "TenantThrottleThresholdExceeded"}}'
response.is_success = False
span = Mock(spec=Span)
conn = await hook.get_async_conn()
actual = await conn.get_root_parse_node(response, span, span)
assert isinstance(actual, JsonParseNode)
error_code = actual.get_child_node("error").get_child_node("code").get_str_value()
assert error_code == "TenantThrottleThresholdExceeded"
| TestKiotaRequestAdapterHook |
python | xlwings__xlwings | xlwings/main.py | {
"start": 119799,
"end": 121073
} | class ____(Collection):
"""
A collection of all :meth:`chart <Chart>` objects on the specified sheet:
>>> import xlwings as xw
>>> xw.books['Book1'].sheets[0].charts
Charts([<Chart 'Chart 1' in <Sheet [Book1]Sheet1>>,
<Chart 'Chart 1' in <Sheet [Book1]Sheet1>>])
.. versionadded:: 0.9.0
"""
_wrap = Chart
def add(self, left=0, top=0, width=355, height=211):
"""
Creates a new chart on the specified sheet.
Arguments
---------
left : float, default 0
left position in points
top : float, default 0
top position in points
width : float, default 355
width in points
height : float, default 211
height in points
Returns
-------
Chart
Examples
--------
>>> import xlwings as xw
>>> sht = xw.Book().sheets[0]
>>> sht.range('A1').value = [['Foo1', 'Foo2'], [1, 2]]
>>> chart = sht.charts.add()
>>> chart.set_source_data(sht.range('A1').expand())
>>> chart.chart_type = 'line'
>>> chart.name
'Chart1'
"""
impl = self.impl.add(left, top, width, height)
return Chart(impl=impl)
| Charts |
python | huggingface__transformers | src/transformers/models/lxmert/modeling_lxmert.py | {
"start": 26673,
"end": 27207
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
| LxmertPooler |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_engine.py | {
"start": 19798,
"end": 21692
} | class ____(fixtures.TestBase):
@testing.fixture
def mock_conn_scalar(self):
return lambda text: Mock(
exec_driver_sql=Mock(
return_value=Mock(scalar=Mock(return_value=text))
)
)
def test_pymssql_version(self, mock_conn_scalar):
dialect = pymssql.MSDialect_pymssql()
for vers in [
"Microsoft SQL Server Blah - 11.0.9216.62",
"Microsoft SQL Server (XYZ) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation",
"Microsoft SQL Azure (RTM) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation",
]:
conn = mock_conn_scalar(vers)
eq_(dialect._get_server_version_info(conn), (11, 0, 9216, 62))
def test_pyodbc_version_productversion(self, mock_conn_scalar):
dialect = pyodbc.MSDialect_pyodbc()
conn = mock_conn_scalar("11.0.9216.62")
eq_(dialect._get_server_version_info(conn), (11, 0, 9216, 62))
def test_pyodbc_version_fallback(self):
dialect = pyodbc.MSDialect_pyodbc()
dialect.dbapi = Mock()
for vers, expected in [
("11.0.9216.62", (11, 0, 9216, 62)),
("notsqlserver.11.foo.0.9216.BAR.62", (11, 0, 9216, 62)),
("Not SQL Server Version 10.5", (5,)),
]:
conn = Mock(
exec_driver_sql=Mock(
return_value=Mock(
scalar=Mock(
side_effect=exc.DBAPIError("stmt", "params", None)
)
)
),
connection=Mock(
dbapi_connection=Mock(getinfo=Mock(return_value=vers)),
),
)
eq_(dialect._get_server_version_info(conn), expected)
| VersionDetectionTest |
python | walkccc__LeetCode | solutions/1933. Check if String Is Decomposable Into Value-Equal Substrings/1933.py | {
"start": 0,
"end": 320
} | class ____:
def isDecomposable(self, s: str) -> bool:
twos = 0
for _, group in itertools.groupby(s):
groupLength = len(list(group))
if groupLength % 3 == 1:
return False
if groupLength % 3 == 2:
twos += 1
if twos > 1:
return False
return twos == 1
| Solution |
python | huggingface__transformers | tests/models/aria/test_image_processing_aria.py | {
"start": 1052,
"end": 5804
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
num_images=1,
min_resolution=30,
max_resolution=40,
size=None,
max_image_size=980,
min_image_size=336,
split_resolutions=None,
split_image=True,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_convert_rgb=True,
resample=PILImageResampling.BICUBIC,
):
self.size = size if size is not None else {"longest_edge": max_resolution}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.num_images = num_images
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.resample = resample
self.max_image_size = max_image_size
self.min_image_size = min_image_size
self.split_resolutions = split_resolutions if split_resolutions is not None else [[980, 980]]
self.split_image = split_image
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"max_image_size": self.max_image_size,
"min_image_size": self.min_image_size,
"split_resolutions": self.split_resolutions,
"split_image": self.split_image,
"do_convert_rgb": self.do_convert_rgb,
"do_normalize": self.do_normalize,
"resample": self.resample,
}
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to AriaImageProcessor,
assuming do_resize is set to True. The expected size in that case the max image size.
"""
return self.max_image_size, self.max_image_size
def expected_output_image_shape(self, images):
height, width = self.get_expected_values(images, batched=True)
return self.num_channels, height, width
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
batch_size = batch_size if batch_size is not None else self.batch_size
min_resolution = min_resolution if min_resolution is not None else self.min_resolution
max_resolution = max_resolution if max_resolution is not None else self.max_resolution
num_channels = num_channels if num_channels is not None else self.num_channels
num_images = num_images if num_images is not None else self.num_images
images_list = []
for i in range(batch_size):
images = []
for j in range(num_images):
if equal_resolution:
width = height = max_resolution
else:
# To avoid getting image width/height 0
if size_divisor is not None:
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
min_resolution = max(size_divisor, min_resolution)
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
images_list.append(images)
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list]
if torchify:
images_list = [[torch.from_numpy(image) for image in images] for images in images_list]
if numpify:
# Numpy images are typically in channels last format
images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list]
return images_list
@require_torch
@require_vision
| AriaImageProcessingTester |
python | huggingface__transformers | tests/models/xglm/test_modeling_xglm.py | {
"start": 10757,
"end": 13165
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (XGLMModel, XGLMForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": XGLMModel, "text-generation": XGLMForCausalLM} if is_torch_available() else {}
)
test_missing_keys = False
def setUp(self):
self.model_tester = XGLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_xglm_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xglm_model(*config_and_inputs)
def test_xglm_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xglm_model_past(*config_and_inputs)
def test_xglm_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xglm_model_attention_mask_past(*config_and_inputs)
def test_xglm_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xglm_model_past_large_inputs(*config_and_inputs)
def test_xglm_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_xglm_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
def test_xglm_weight_initialization(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xglm_weight_initialization(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "facebook/xglm-564M"
model = XGLMModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_model_parallelism(self):
super().test_model_parallelism()
@require_torch
| XGLMModelTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py | {
"start": 5018,
"end": 60524
} | class ____(AwsBaseHook):
"""
Interact with Amazon SageMaker.
Provide thick wrapper around
:external+boto3:py:class:`boto3.client("sagemaker") <SageMaker.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
non_terminal_states = {"InProgress", "Stopping"}
endpoint_non_terminal_states = {"Creating", "Updating", "SystemUpdating", "RollingBack", "Deleting"}
pipeline_non_terminal_states = {"Executing", "Stopping"}
processing_job_non_terminal_states = {"InProgress", "Stopping"}
failed_states = {"Failed"}
processing_job_failed_states = {*failed_states, "Stopped"}
training_failed_states = {*failed_states, "Stopped"}
def __init__(self, *args, **kwargs):
super().__init__(client_type="sagemaker", *args, **kwargs)
self.s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
self.logs_hook = AwsLogsHook(aws_conn_id=self.aws_conn_id)
def tar_and_s3_upload(self, path: str, key: str, bucket: str) -> None:
"""
Tar the local file or directory and upload to s3.
:param path: local file or directory
:param key: s3 key
:param bucket: s3 bucket
"""
with tempfile.TemporaryFile() as temp_file:
if os.path.isdir(path):
files = [os.path.join(path, name) for name in os.listdir(path)]
else:
files = [path]
with tarfile.open(mode="w:gz", fileobj=temp_file) as tar_file:
for f in files:
tar_file.add(f, arcname=os.path.basename(f))
temp_file.seek(0)
self.s3_hook.load_file_obj(temp_file, key, bucket, replace=True)
def configure_s3_resources(self, config: dict) -> None:
"""
Extract the S3 operations from the configuration and execute them.
:param config: config of SageMaker operation
"""
s3_operations = config.pop("S3Operations", None)
if s3_operations is not None:
create_bucket_ops = s3_operations.get("S3CreateBucket", [])
upload_ops = s3_operations.get("S3Upload", [])
for op in create_bucket_ops:
self.s3_hook.create_bucket(bucket_name=op["Bucket"])
for op in upload_ops:
if op["Tar"]:
self.tar_and_s3_upload(op["Path"], op["Key"], op["Bucket"])
else:
self.s3_hook.load_file(op["Path"], op["Key"], op["Bucket"])
def check_s3_url(self, s3url: str) -> bool:
"""
Check if an S3 URL exists.
:param s3url: S3 url
"""
bucket, key = S3Hook.parse_s3_url(s3url)
if not self.s3_hook.check_for_bucket(bucket_name=bucket):
raise AirflowException(f"The input S3 Bucket {bucket} does not exist ")
if (
key
and not self.s3_hook.check_for_key(key=key, bucket_name=bucket)
and not self.s3_hook.check_for_prefix(prefix=key, bucket_name=bucket, delimiter="/")
):
# check if s3 key exists in the case user provides a single file
# or if s3 prefix exists in the case user provides multiple files in
# a prefix
raise AirflowException(
f"The input S3 Key or Prefix {s3url} does not exist in the Bucket {bucket}"
)
return True
def check_training_config(self, training_config: dict) -> None:
"""
Check if a training configuration is valid.
:param training_config: training_config
"""
if "InputDataConfig" in training_config:
for channel in training_config["InputDataConfig"]:
if "S3DataSource" in channel["DataSource"]:
self.check_s3_url(channel["DataSource"]["S3DataSource"]["S3Uri"])
def check_tuning_config(self, tuning_config: dict) -> None:
"""
Check if a tuning configuration is valid.
:param tuning_config: tuning_config
"""
for channel in tuning_config["TrainingJobDefinition"]["InputDataConfig"]:
if "S3DataSource" in channel["DataSource"]:
self.check_s3_url(channel["DataSource"]["S3DataSource"]["S3Uri"])
def multi_stream_iter(self, log_group: str, streams: list, positions=None) -> Generator:
"""
Iterate over the available events.
The events coming from a set of log streams in a single log group
interleaving the events from each stream so they're yielded in timestamp order.
:param log_group: The name of the log group.
:param streams: A list of the log stream names. The position of the stream in this list is
the stream number.
:param positions: A list of pairs of (timestamp, skip) which represents the last record
read from each stream.
:return: A tuple of (stream number, cloudwatch log event).
"""
positions = positions or {s: Position(timestamp=0, skip=0) for s in streams}
event_iters = [
self.logs_hook.get_log_events(log_group, s, positions[s].timestamp, positions[s].skip)
for s in streams
]
events: list[Any | None] = []
for event_stream in event_iters:
if event_stream:
try:
events.append(next(event_stream))
except StopIteration:
events.append(None)
else:
events.append(None)
while any(events):
i = argmin(events, lambda x: x["timestamp"] if x else 9999999999) or 0
yield i, events[i]
try:
events[i] = next(event_iters[i])
except StopIteration:
events[i] = None
def create_training_job(
self,
config: dict,
wait_for_completion: bool = True,
print_log: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""
Start a model training job.
After training completes, Amazon SageMaker saves the resulting model
artifacts to an Amazon S3 location that you specify.
:param config: the config for training
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to training job creation
"""
self.check_training_config(config)
response = self.get_conn().create_training_job(**config)
if print_log:
self.check_training_status_with_log(
config["TrainingJobName"],
self.non_terminal_states,
self.training_failed_states,
wait_for_completion,
check_interval,
max_ingestion_time,
)
elif wait_for_completion:
describe_response = self.check_status(
config["TrainingJobName"],
"TrainingJobStatus",
self.describe_training_job,
check_interval,
max_ingestion_time,
)
billable_seconds = SageMakerHook.count_billable_seconds(
training_start_time=describe_response["TrainingStartTime"],
training_end_time=describe_response["TrainingEndTime"],
instance_count=describe_response["ResourceConfig"]["InstanceCount"],
)
self.log.info("Billable seconds: %d", billable_seconds)
return response
def create_tuning_job(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""
Start a hyperparameter tuning job.
A hyperparameter tuning job finds the best version of a model by running
many training jobs on your dataset using the algorithm you choose and
values for hyperparameters within ranges that you specify. It then
chooses the hyperparameter values that result in a model that performs
the best, as measured by an objective metric that you choose.
:param config: the config for tuning
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to tuning job creation
"""
self.check_tuning_config(config)
response = self.get_conn().create_hyper_parameter_tuning_job(**config)
if wait_for_completion:
self.check_status(
config["HyperParameterTuningJobName"],
"HyperParameterTuningJobStatus",
self.describe_tuning_job,
check_interval,
max_ingestion_time,
)
return response
def create_transform_job(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""
Start a transform job.
A transform job uses a trained model to get inferences on a dataset and
saves these results to an Amazon S3 location that you specify.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_transform_job`
:param config: the config for transform job
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to transform job creation
"""
if "S3DataSource" in config["TransformInput"]["DataSource"]:
self.check_s3_url(config["TransformInput"]["DataSource"]["S3DataSource"]["S3Uri"])
response = self.get_conn().create_transform_job(**config)
if wait_for_completion:
self.check_status(
config["TransformJobName"],
"TransformJobStatus",
self.describe_transform_job,
check_interval,
max_ingestion_time,
)
return response
def create_processing_job(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""
Use Amazon SageMaker Processing to analyze data and evaluate models.
With Processing, you can use a simplified, managed experience on
SageMaker to run your data processing workloads, such as feature
engineering, data validation, model evaluation, and model
interpretation.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_processing_job`
:param config: the config for processing job
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to transform job creation
"""
response = self.get_conn().create_processing_job(**config)
if wait_for_completion:
self.check_status(
config["ProcessingJobName"],
"ProcessingJobStatus",
self.describe_processing_job,
check_interval,
max_ingestion_time,
)
return response
def create_model(self, config: dict):
"""
Create a model in Amazon SageMaker.
In the request, you name the model and describe a primary container. For
the primary container, you specify the Docker image that contains
inference code, artifacts (from prior training), and a custom
environment map that the inference code uses when you deploy the model
for predictions.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_model`
:param config: the config for model
:return: A response to model creation
"""
return self.get_conn().create_model(**config)
def create_endpoint_config(self, config: dict):
"""
Create an endpoint configuration to deploy models.
In the configuration, you identify one or more models, created using the
CreateModel API, to deploy and the resources that you want Amazon
SageMaker to provision.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_endpoint_config`
- :class:`airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.create_model`
- :class:`airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.create_endpoint`
:param config: the config for endpoint-config
:return: A response to endpoint config creation
"""
return self.get_conn().create_endpoint_config(**config)
def create_endpoint(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""
Create an endpoint from configuration.
When you create a serverless endpoint, SageMaker provisions and manages
the compute resources for you. Then, you can make inference requests to
the endpoint and receive model predictions in response. SageMaker scales
the compute resources up and down as needed to handle your request traffic.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_endpoint`
- :class:`airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.create_endpoint`
:param config: the config for endpoint
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to endpoint creation
"""
response = self.get_conn().create_endpoint(**config)
if wait_for_completion:
self.check_status(
config["EndpointName"],
"EndpointStatus",
self.describe_endpoint,
check_interval,
max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states,
)
return response
def update_endpoint(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""
Deploy the config in the request and switch to using the new endpoint.
Resources provisioned for the endpoint using the previous EndpointConfig
are deleted (there is no availability loss).
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.update_endpoint`
:param config: the config for endpoint
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to endpoint update
"""
response = self.get_conn().update_endpoint(**config)
if wait_for_completion:
self.check_status(
config["EndpointName"],
"EndpointStatus",
self.describe_endpoint,
check_interval,
max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states,
)
return response
def describe_training_job(self, name: str):
"""
Get the training job info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_training_job`
:param name: the name of the training job
:return: A dict contains all the training job info
"""
return self.get_conn().describe_training_job(TrainingJobName=name)
def describe_training_job_with_log(
self,
job_name: str,
positions,
stream_names: list,
instance_count: int,
state: int,
last_description: dict,
last_describe_job_call: float,
):
"""Get the associated training job info and print CloudWatch logs."""
log_group = "/aws/sagemaker/TrainingJobs"
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
logs_conn = self.logs_hook.get_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + "/",
orderBy="LogStreamName",
limit=instance_count,
)
stream_names = [s["logStreamName"] for s in streams["logStreams"]]
positions.update(
[(s, Position(timestamp=0, skip=0)) for s in stream_names if s not in positions]
)
except logs_conn.exceptions.ResourceNotFoundException:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
pass
if stream_names:
for idx, event in self.multi_stream_iter(log_group, stream_names, positions):
self.log.info(event["message"])
ts, count = positions[stream_names[idx]]
if event["timestamp"] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event["timestamp"], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.monotonic() - last_describe_job_call >= 30:
description = self.describe_training_job(job_name)
last_describe_job_call = time.monotonic()
if secondary_training_status_changed(description, last_description):
self.log.info(secondary_training_status_message(description, last_description))
last_description = description
status = description["TrainingJobStatus"]
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call
def describe_tuning_job(self, name: str) -> dict:
"""
Get the tuning job info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_hyper_parameter_tuning_job`
:param name: the name of the tuning job
:return: A dict contains all the tuning job info
"""
return self.get_conn().describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=name)
def describe_model(self, name: str) -> dict:
"""
Get the SageMaker model info associated with the name.
:param name: the name of the SageMaker model
:return: A dict contains all the model info
"""
return self.get_conn().describe_model(ModelName=name)
def describe_transform_job(self, name: str) -> dict:
"""
Get the transform job info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_transform_job`
:param name: the name of the transform job
:return: A dict contains all the transform job info
"""
return self.get_conn().describe_transform_job(TransformJobName=name)
def describe_processing_job(self, name: str) -> dict:
"""
Get the processing job info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_processing_job`
:param name: the name of the processing job
:return: A dict contains all the processing job info
"""
return self.get_conn().describe_processing_job(ProcessingJobName=name)
def describe_endpoint_config(self, name: str) -> dict:
"""
Get the endpoint config info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_endpoint_config`
:param name: the name of the endpoint config
:return: A dict contains all the endpoint config info
"""
return self.get_conn().describe_endpoint_config(EndpointConfigName=name)
def describe_endpoint(self, name: str) -> dict:
"""
Get the description of an endpoint.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_endpoint`
:param name: the name of the endpoint
:return: A dict contains all the endpoint info
"""
return self.get_conn().describe_endpoint(EndpointName=name)
def check_status(
self,
job_name: str,
key: str,
describe_function: Callable,
check_interval: int,
max_ingestion_time: int | None = None,
non_terminal_states: set | None = None,
) -> dict:
"""
Check status of a SageMaker resource.
:param job_name: name of the resource to check status, can be a job but
also pipeline for instance.
:param key: the key of the response dict that points to the state
:param describe_function: the function used to retrieve the status
:param args: the arguments for the function
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker resource
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker resources that run longer than this will fail. Setting
this to None implies no timeout for any SageMaker resource.
:param non_terminal_states: the set of nonterminal states
:return: response of describe call after resource is done
"""
if not non_terminal_states:
non_terminal_states = self.non_terminal_states
sec = 0
while True:
time.sleep(check_interval)
sec += check_interval
try:
response = describe_function(job_name)
status = response[key]
self.log.info("Resource still running for %s seconds... current status is %s", sec, status)
except KeyError:
raise AirflowException("Could not get status of the SageMaker resource")
except ClientError:
raise AirflowException("AWS request failed, check logs for more info")
if status in self.failed_states:
raise AirflowException(f"SageMaker resource failed because {response['FailureReason']}")
if status not in non_terminal_states:
break
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the resource gets killed if the max ingestion time is exceeded
raise AirflowException(f"SageMaker resource took more than {max_ingestion_time} seconds")
self.log.info("SageMaker resource completed")
return response
def check_training_status_with_log(
self,
job_name: str,
non_terminal_states: set,
failed_states: set,
wait_for_completion: bool,
check_interval: int,
max_ingestion_time: int | None = None,
):
"""
Display logs for a given training job.
Optionally tailing them until the job is complete.
:param job_name: name of the training job to check status and display logs for
:param non_terminal_states: the set of non_terminal states
:param failed_states: the set of failed states
:param wait_for_completion: Whether to keep looking for new log entries
until the job completes
:param check_interval: The interval in seconds between polling for new log entries and job completion
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
"""
sec = 0
description = self.describe_training_job(job_name)
self.log.info(secondary_training_status_message(description, None))
instance_count = description["ResourceConfig"]["InstanceCount"]
status = description["TrainingJobStatus"]
stream_names: list = [] # The list of log streams
positions: dict = {} # The current position in each stream, map of stream name -> position
job_already_completed = status not in non_terminal_states
state = LogState.TAILING if wait_for_completion and not job_already_completed else LogState.COMPLETE
# The loop below implements a state machine that alternates between checking the job status and
# reading whatever is available in the logs at this point. Note, that if we were called with
# wait_for_completion == False, we never check the job status.
#
# If wait_for_completion == TRUE and job is not completed, the initial state is TAILING
# If wait_for_completion == FALSE, the initial state is COMPLETE
# (doesn't matter if the job really is complete).
#
# The state table:
#
# STATE ACTIONS CONDITION NEW STATE
# ---------------- ---------------- ----------------- ----------------
# TAILING Read logs, Pause, Get status Job complete JOB_COMPLETE
# Else TAILING
# JOB_COMPLETE Read logs, Pause Any COMPLETE
# COMPLETE Read logs, Exit N/A
#
# Notes:
# - The JOB_COMPLETE state forces us to do an extra pause and read any items that
# got to Cloudwatch after the job was marked complete.
last_describe_job_call = time.monotonic()
last_description = description
while True:
time.sleep(check_interval)
sec += check_interval
state, last_description, last_describe_job_call = self.describe_training_job_with_log(
job_name,
positions,
stream_names,
instance_count,
state,
last_description,
last_describe_job_call,
)
if state == LogState.COMPLETE:
break
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException(f"SageMaker job took more than {max_ingestion_time} seconds")
if wait_for_completion:
status = last_description["TrainingJobStatus"]
if status in failed_states:
reason = last_description.get("FailureReason", "(No reason provided)")
raise AirflowException(f"Error training {job_name}: {status} Reason: {reason}")
billable_seconds = SageMakerHook.count_billable_seconds(
training_start_time=last_description["TrainingStartTime"],
training_end_time=last_description["TrainingEndTime"],
instance_count=instance_count,
)
self.log.info("Billable seconds: %d", billable_seconds)
def list_training_jobs(
self, name_contains: str | None = None, max_results: int | None = None, **kwargs
) -> list[dict]:
"""
Call boto3's ``list_training_jobs``.
The training job name and max results are configurable via arguments.
Other arguments are not, and should be provided via kwargs. Note that
boto3 expects these in CamelCase, for example:
.. code-block:: python
list_training_jobs(name_contains="myjob", StatusEquals="Failed")
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.list_training_jobs`
:param name_contains: (optional) partial name to match
:param max_results: (optional) maximum number of results to return. None returns infinite results
:param kwargs: (optional) kwargs to boto3's list_training_jobs method
:return: results of the list_training_jobs request
"""
config, max_results = self._preprocess_list_request_args(name_contains, max_results, **kwargs)
list_training_jobs_request = partial(self.get_conn().list_training_jobs, **config)
results = self._list_request(
list_training_jobs_request, "TrainingJobSummaries", max_results=max_results
)
return results
def list_transform_jobs(
self, name_contains: str | None = None, max_results: int | None = None, **kwargs
) -> list[dict]:
"""
Call boto3's ``list_transform_jobs``.
The transform job name and max results are configurable via arguments.
Other arguments are not, and should be provided via kwargs. Note that
boto3 expects these in CamelCase, for example:
.. code-block:: python
list_transform_jobs(name_contains="myjob", StatusEquals="Failed")
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.list_transform_jobs`
:param name_contains: (optional) partial name to match.
:param max_results: (optional) maximum number of results to return.
None returns infinite results.
:param kwargs: (optional) kwargs to boto3's list_transform_jobs method.
:return: results of the list_transform_jobs request.
"""
config, max_results = self._preprocess_list_request_args(name_contains, max_results, **kwargs)
list_transform_jobs_request = partial(self.get_conn().list_transform_jobs, **config)
results = self._list_request(
list_transform_jobs_request, "TransformJobSummaries", max_results=max_results
)
return results
def list_processing_jobs(self, **kwargs) -> list[dict]:
"""
Call boto3's `list_processing_jobs`.
All arguments should be provided via kwargs. Note that boto3 expects
these in CamelCase, for example:
.. code-block:: python
list_processing_jobs(NameContains="myjob", StatusEquals="Failed")
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.list_processing_jobs`
:param kwargs: (optional) kwargs to boto3's list_training_jobs method
:return: results of the list_processing_jobs request
"""
list_processing_jobs_request = partial(self.get_conn().list_processing_jobs, **kwargs)
results = self._list_request(
list_processing_jobs_request, "ProcessingJobSummaries", max_results=kwargs.get("MaxResults")
)
return results
def _preprocess_list_request_args(
self, name_contains: str | None = None, max_results: int | None = None, **kwargs
) -> tuple[dict[str, Any], int | None]:
"""
Preprocess arguments for boto3's ``list_*`` methods.
It will turn arguments name_contains and max_results as boto3 compliant
CamelCase format. This method also makes sure that these two arguments
are only set once.
:param name_contains: boto3 function with arguments
:param max_results: the result key to iterate over
:param kwargs: (optional) kwargs to boto3's list_* method
:return: Tuple with config dict to be passed to boto3's list_* method
and max_results parameter
"""
config = {}
if name_contains:
if "NameContains" in kwargs:
raise AirflowException("Either name_contains or NameContains can be provided, not both.")
config["NameContains"] = name_contains
if "MaxResults" in kwargs and kwargs["MaxResults"] is not None:
if max_results:
raise AirflowException("Either max_results or MaxResults can be provided, not both.")
# Unset MaxResults, we'll use the SageMakerHook's internal method for iteratively fetching results
max_results = kwargs["MaxResults"]
del kwargs["MaxResults"]
config.update(kwargs)
return config, max_results
def _list_request(
self, partial_func: Callable, result_key: str, max_results: int | None = None
) -> list[dict]:
"""
Process a list request to produce results.
All AWS boto3 ``list_*`` requests return results in batches, and if the
key "NextToken" is contained in the result, there are more results to
fetch. The default AWS batch size is 10, and configurable up to 100.
This function iteratively loads all results (or up to a given maximum).
Each boto3 ``list_*`` function returns the results in a list with a
different name. The key of this structure must be given to iterate over
the results, e.g. "TransformJobSummaries" for ``list_transform_jobs()``.
:param partial_func: boto3 function with arguments
:param result_key: the result key to iterate over
:param max_results: maximum number of results to return (None = infinite)
:return: Results of the list_* request
"""
sagemaker_max_results = 100 # Fixed number set by AWS
results: list[dict] = []
next_token = None
while True:
kwargs = {}
if next_token is not None:
kwargs["NextToken"] = next_token
if max_results is None:
kwargs["MaxResults"] = sagemaker_max_results
else:
kwargs["MaxResults"] = min(max_results - len(results), sagemaker_max_results)
response = partial_func(**kwargs)
self.log.debug("Fetched %s results.", len(response[result_key]))
results.extend(response[result_key])
if "NextToken" not in response or (max_results is not None and len(results) == max_results):
# Return when there are no results left (no NextToken) or when we've reached max_results.
return results
next_token = response["NextToken"]
@staticmethod
def _name_matches_pattern(
processing_job_name: str,
found_name: str,
job_name_suffix: str | None = None,
) -> bool:
return re.fullmatch(f"{processing_job_name}({job_name_suffix})?", found_name) is not None
def count_processing_jobs_by_name(
self,
processing_job_name: str,
job_name_suffix: str | None = None,
throttle_retry_delay: int = 2,
retries: int = 3,
) -> int:
"""
Get the number of processing jobs found with the provided name prefix.
:param processing_job_name: The prefix to look for.
:param job_name_suffix: The optional suffix which may be appended to deduplicate an existing job name.
:param throttle_retry_delay: Seconds to wait if a ThrottlingException is hit.
:param retries: The max number of times to retry.
:returns: The number of processing jobs that start with the provided prefix.
"""
try:
jobs = self.get_conn().list_processing_jobs(NameContains=processing_job_name)
# We want to make sure the job name starts with the provided name, not just contains it.
matching_jobs = [
job["ProcessingJobName"]
for job in jobs["ProcessingJobSummaries"]
if self._name_matches_pattern(processing_job_name, job["ProcessingJobName"], job_name_suffix)
]
return len(matching_jobs)
except ClientError as e:
if e.response["Error"]["Code"] == "ResourceNotFound":
# No jobs found with that name. This is good, return 0.
return 0
if e.response["Error"]["Code"] == "ThrottlingException" and retries:
# If we hit a ThrottlingException, back off a little and try again.
time.sleep(throttle_retry_delay)
return self.count_processing_jobs_by_name(
processing_job_name, job_name_suffix, throttle_retry_delay * 2, retries - 1
)
raise
def delete_model(self, model_name: str):
"""
Delete a SageMaker model.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.delete_model`
:param model_name: name of the model
"""
try:
self.get_conn().delete_model(ModelName=model_name)
except Exception as general_error:
self.log.error("Failed to delete model, error: %s", general_error)
raise
def describe_pipeline_exec(self, pipeline_exec_arn: str, verbose: bool = False):
"""
Get info about a SageMaker pipeline execution.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_pipeline_execution`
- :external+boto3:py:meth:`SageMaker.Client.list_pipeline_execution_steps`
:param pipeline_exec_arn: arn of the pipeline execution
:param verbose: Whether to log details about the steps status in the pipeline execution
"""
if verbose:
res = self.conn.list_pipeline_execution_steps(PipelineExecutionArn=pipeline_exec_arn)
count_by_state = Counter(s["StepStatus"] for s in res["PipelineExecutionSteps"])
running_steps = [
s["StepName"] for s in res["PipelineExecutionSteps"] if s["StepStatus"] == "Executing"
]
self.log.info("state of the pipeline steps: %s", count_by_state)
self.log.info("steps currently in progress: %s", running_steps)
return self.conn.describe_pipeline_execution(PipelineExecutionArn=pipeline_exec_arn)
def start_pipeline(
self,
pipeline_name: str,
display_name: str = "airflow-triggered-execution",
pipeline_params: dict | None = None,
) -> str:
"""
Start a new execution for a SageMaker pipeline.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.start_pipeline_execution`
:param pipeline_name: Name of the pipeline to start (this is _not_ the ARN).
:param display_name: The name this pipeline execution will have in the UI. Doesn't need to be unique.
:param pipeline_params: Optional parameters for the pipeline.
All parameters supplied need to already be present in the pipeline definition.
:return: the ARN of the pipeline execution launched.
"""
formatted_params = format_tags(pipeline_params, key_label="Name")
try:
res = self.conn.start_pipeline_execution(
PipelineName=pipeline_name,
PipelineExecutionDisplayName=display_name,
PipelineParameters=formatted_params,
)
except ClientError as ce:
self.log.error("Failed to start pipeline execution, error: %s", ce)
raise
return res["PipelineExecutionArn"]
def stop_pipeline(
self,
pipeline_exec_arn: str,
fail_if_not_running: bool = False,
) -> str:
"""
Stop SageMaker pipeline execution.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.stop_pipeline_execution`
:param pipeline_exec_arn: Amazon Resource Name (ARN) of the pipeline execution.
It's the ARN of the pipeline itself followed by "/execution/" and an id.
:param fail_if_not_running: This method will raise an exception if the pipeline we're trying to stop
is not in an "Executing" state when the call is sent (which would mean that the pipeline is
already either stopping or stopped).
Note that setting this to True will raise an error if the pipeline finished successfully before it
was stopped.
:return: Status of the pipeline execution after the operation.
One of 'Executing'|'Stopping'|'Stopped'|'Failed'|'Succeeded'.
"""
for retries in reversed(range(5)):
try:
self.conn.stop_pipeline_execution(PipelineExecutionArn=pipeline_exec_arn)
except ClientError as ce:
# this can happen if the pipeline was transitioning between steps at that moment
if ce.response["Error"]["Code"] == "ConflictException" and retries:
self.log.warning(
"Got a conflict exception when trying to stop the pipeline, "
"retrying %s more times. Error was: %s",
retries,
ce,
)
time.sleep(0.3) # error is due to a race condition, so it should be very transient
else:
# we have to rely on the message to catch the right error here, because its type
# (ValidationException) is shared with other kinds of errors (e.g. badly formatted ARN)
if (
not fail_if_not_running
and "Only pipelines with 'Executing' status can be stopped"
in ce.response["Error"]["Message"]
):
self.log.warning("Cannot stop pipeline execution, as it was not running: %s", ce)
break
self.log.error(ce)
raise
else:
break
res = self.describe_pipeline_exec(pipeline_exec_arn)
return res["PipelineExecutionStatus"]
def create_model_package_group(self, package_group_name: str, package_group_desc: str = "") -> bool:
"""
Create a Model Package Group if it does not already exist.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_model_package_group`
:param package_group_name: Name of the model package group to create if not already present.
:param package_group_desc: Description of the model package group, if it was to be created (optional).
:return: True if the model package group was created, False if it already existed.
"""
try:
res = self.conn.create_model_package_group(
ModelPackageGroupName=package_group_name,
ModelPackageGroupDescription=package_group_desc,
)
self.log.info(
"Created new Model Package Group with name %s (ARN: %s)",
package_group_name,
res["ModelPackageGroupArn"],
)
return True
except ClientError as e:
# ValidationException can also happen if the package group name contains invalid char,
# so we have to look at the error message too
if e.response["Error"]["Code"] == "ValidationException" and e.response["Error"][
"Message"
].startswith("Model Package Group already exists"):
# log msg only so it doesn't look like an error
self.log.info("%s", e.response["Error"]["Message"])
return False
self.log.error("Error when trying to create Model Package Group: %s", e)
raise
def _describe_auto_ml_job(self, job_name: str):
res = self.conn.describe_auto_ml_job(AutoMLJobName=job_name)
self.log.info("%s's current step: %s", job_name, res["AutoMLJobSecondaryStatus"])
return res
def create_auto_ml_job(
self,
job_name: str,
s3_input: str,
target_attribute: str,
s3_output: str,
role_arn: str,
compressed_input: bool = False,
time_limit: int | None = None,
autodeploy_endpoint_name: str | None = None,
extras: dict | None = None,
wait_for_completion: bool = True,
check_interval: int = 30,
) -> dict | None:
"""
Create an auto ML job to predict the given column.
The learning input is based on data provided through S3 , and the output
is written to the specified S3 location.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_auto_ml_job`
:param job_name: Name of the job to create, needs to be unique within the account.
:param s3_input: The S3 location (folder or file) where to fetch the data.
By default, it expects csv with headers.
:param target_attribute: The name of the column containing the values to predict.
:param s3_output: The S3 folder where to write the model artifacts. Must be 128 characters or fewer.
:param role_arn: The ARN or the IAM role to use when interacting with S3.
Must have read access to the input, and write access to the output folder.
:param compressed_input: Set to True if the input is gzipped.
:param time_limit: The maximum amount of time in seconds to spend training the model(s).
:param autodeploy_endpoint_name: If specified, the best model will be deployed to an endpoint with
that name. No deployment made otherwise.
:param extras: Use this dictionary to set any variable input variable for job creation that is not
offered through the parameters of this function. The format is described in:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.create_auto_ml_job
:param wait_for_completion: Whether to wait for the job to finish before returning. Defaults to True.
:param check_interval: Interval in seconds between 2 status checks when waiting for completion.
:returns: Only if waiting for completion, a dictionary detailing the best model. The structure is that
of the "BestCandidate" key in:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.describe_auto_ml_job
"""
input_data = [
{
"DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": s3_input}},
"TargetAttributeName": target_attribute,
},
]
params_dict = {
"AutoMLJobName": job_name,
"InputDataConfig": input_data,
"OutputDataConfig": {"S3OutputPath": s3_output},
"RoleArn": role_arn,
}
if compressed_input:
input_data[0]["CompressionType"] = "Gzip"
if time_limit:
params_dict.update(
{"AutoMLJobConfig": {"CompletionCriteria": {"MaxAutoMLJobRuntimeInSeconds": time_limit}}}
)
if autodeploy_endpoint_name:
params_dict.update({"ModelDeployConfig": {"EndpointName": autodeploy_endpoint_name}})
if extras:
params_dict.update(extras)
# returns the job ARN, but we don't need it because we access it by its name
self.conn.create_auto_ml_job(**params_dict)
if wait_for_completion:
res = self.check_status(
job_name,
"AutoMLJobStatus",
# cannot pass the function directly because the parameter needs to be named
self._describe_auto_ml_job,
check_interval,
)
if "BestCandidate" in res:
return res["BestCandidate"]
return None
@staticmethod
def count_billable_seconds(
training_start_time: datetime, training_end_time: datetime, instance_count: int
) -> int:
billable_time = (training_end_time - training_start_time) * instance_count
return int(billable_time.total_seconds()) + 1
async def describe_training_job_async(self, job_name: str) -> dict[str, Any]:
"""
Return the training job info associated with the name.
:param job_name: the name of the training job
"""
async with await self.get_async_conn() as client:
response: dict[str, Any] = await client.describe_training_job(TrainingJobName=job_name)
return response
async def describe_training_job_with_log_async(
self,
job_name: str,
positions: dict[str, Any],
stream_names: list[str],
instance_count: int,
state: int,
last_description: dict[str, Any],
last_describe_job_call: float,
) -> tuple[int, dict[str, Any], float]:
"""
Return the training job info associated with job_name and print CloudWatch logs.
:param job_name: name of the job to check status
:param positions: A list of pairs of (timestamp, skip) which represents the last record
read from each stream.
:param stream_names: A list of the log stream names. The position of the stream in this list is
the stream number.
:param instance_count: Count of the instance created for the job initially
:param state: log state
:param last_description: Latest description of the training job
:param last_describe_job_call: previous job called time
"""
log_group = "/aws/sagemaker/TrainingJobs"
if len(stream_names) < instance_count:
logs_hook = AwsLogsHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
streams = await logs_hook.describe_log_streams_async(
log_group=log_group,
stream_prefix=job_name + "/",
order_by="LogStreamName",
count=instance_count,
)
stream_names = [s["logStreamName"] for s in streams["logStreams"]] if streams else []
positions.update([(s, Position(timestamp=0, skip=0)) for s in stream_names if s not in positions])
if len(stream_names) > 0:
async for idx, event in self.get_multi_stream(log_group, stream_names, positions):
self.log.info(event["message"])
ts, count = positions[stream_names[idx]]
if event["timestamp"] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event["timestamp"], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.time() - last_describe_job_call >= 30:
description = await self.describe_training_job_async(job_name)
last_describe_job_call = time.time()
if await sync_to_async(secondary_training_status_changed)(description, last_description):
self.log.info(
await sync_to_async(secondary_training_status_message)(description, last_description)
)
last_description = description
status = description["TrainingJobStatus"]
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call
async def get_multi_stream(
self, log_group: str, streams: list[str], positions: dict[str, Any]
) -> AsyncGenerator[Any, tuple[int, Any | None]]:
"""
Iterate over the available events coming and interleaving the events from each stream so they're yielded in timestamp order.
:param log_group: The name of the log group.
:param streams: A list of the log stream names. The position of the stream in this list is
the stream number.
:param positions: A list of pairs of (timestamp, skip) which represents the last record
read from each stream.
"""
positions = positions or {s: Position(timestamp=0, skip=0) for s in streams}
events: list[Any | None] = []
logs_hook = AwsLogsHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
event_iters = [
logs_hook.get_log_events_async(log_group, s, positions[s].timestamp, positions[s].skip)
for s in streams
]
for event_stream in event_iters:
if not event_stream:
events.append(None)
continue
try:
events.append(await event_stream.__anext__())
except StopAsyncIteration:
events.append(None)
while any(events):
i = argmin(events, lambda x: x["timestamp"] if x else 9999999999) or 0
yield i, events[i]
try:
events[i] = await event_iters[i].__anext__()
except StopAsyncIteration:
events[i] = None
| SageMakerHook |
python | allegroai__clearml | clearml/backend_api/services/v2_23/queues.py | {
"start": 87172,
"end": 88061
} | class ____(Response):
"""
Response of queues.peek_task endpoint.
:param task: Task ID
:type task: str
"""
_service = "queues"
_action = "peek_task"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {"task": {"description": "Task ID", "type": ["string", "null"]}},
"type": "object",
}
def __init__(self, task: Optional[str] = None, **kwargs: Any) -> None:
super(PeekTaskResponse, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| PeekTaskResponse |
python | getsentry__sentry | src/sentry/models/releaseactivity.py | {
"start": 307,
"end": 716
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
release = FlexibleForeignKey("sentry.Release", db_index=True)
type = BoundedPositiveIntegerField(null=False, choices=CHOICES)
data = models.JSONField(default=dict)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_releaseactivity"
| ReleaseActivity |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 548443,
"end": 549257
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for CreatedPullRequestContribution."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("CreatedPullRequestContributionEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("CreatedPullRequestContribution"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| CreatedPullRequestContributionConnection |
python | django__django | tests/fixtures_regress/models.py | {
"start": 7930,
"end": 8102
} | class ____(BaseNKModel):
b = models.ForeignKey(M2MComplexCircular1B, models.CASCADE)
c = models.ForeignKey(M2MComplexCircular1C, models.CASCADE)
| M2MCircular1ThroughBC |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_compiler.py | {
"start": 4590,
"end": 102646
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = postgresql.dialect()
def test_plain_stringify_returning(self):
t = Table(
"t",
MetaData(),
Column("myid", Integer, primary_key=True),
Column("name", String, server_default="some str"),
Column("description", String, default=func.lower("hi")),
)
stmt = t.insert().values().return_defaults()
eq_ignore_whitespace(
str(stmt.compile(dialect=postgresql.dialect())),
"INSERT INTO t (description) VALUES (lower(%(lower_1)s)) "
"RETURNING t.myid, t.name, t.description",
)
def test_update_returning(self):
dialect = postgresql.dialect()
table1 = table(
"mytable",
column("myid", Integer),
column("name", String(128)),
column("description", String(128)),
)
u = (
update(table1)
.values(dict(name="foo"))
.returning(table1.c.myid, table1.c.name)
)
self.assert_compile(
u,
"UPDATE mytable SET name=%(name)s "
"RETURNING mytable.myid, mytable.name",
dialect=dialect,
)
u = update(table1).values(dict(name="foo")).returning(table1)
self.assert_compile(
u,
"UPDATE mytable SET name=%(name)s "
"RETURNING mytable.myid, mytable.name, "
"mytable.description",
dialect=dialect,
)
u = (
update(table1)
.values(dict(name="foo"))
.returning(func.length(table1.c.name))
)
self.assert_compile(
u,
"UPDATE mytable SET name=%(name)s "
"RETURNING length(mytable.name) AS length_1",
dialect=dialect,
)
def test_insert_returning(self):
dialect = postgresql.dialect()
table1 = table(
"mytable",
column("myid", Integer),
column("name", String(128)),
column("description", String(128)),
)
i = (
insert(table1)
.values(dict(name="foo"))
.returning(table1.c.myid, table1.c.name)
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) RETURNING mytable.myid, "
"mytable.name",
dialect=dialect,
)
i = insert(table1).values(dict(name="foo")).returning(table1)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) RETURNING mytable.myid, "
"mytable.name, mytable.description",
dialect=dialect,
)
i = (
insert(table1)
.values(dict(name="foo"))
.returning(func.length(table1.c.name))
)
self.assert_compile(
i,
"INSERT INTO mytable (name) VALUES "
"(%(name)s) RETURNING length(mytable.name) "
"AS length_1",
dialect=dialect,
)
@testing.fixture
def column_expression_fixture(self):
class MyString(TypeEngine):
def column_expression(self, column):
return func.lower(column)
return table(
"some_table", column("name", String), column("value", MyString)
)
@testing.combinations("columns", "table", argnames="use_columns")
def test_plain_returning_column_expression(
self, column_expression_fixture, use_columns
):
"""test #8770"""
table1 = column_expression_fixture
if use_columns == "columns":
stmt = insert(table1).returning(table1)
else:
stmt = insert(table1).returning(table1.c.name, table1.c.value)
self.assert_compile(
stmt,
"INSERT INTO some_table (name, value) "
"VALUES (%(name)s, %(value)s) RETURNING some_table.name, "
"lower(some_table.value) AS value",
)
def test_create_drop_enum(self):
# test escaping and unicode within CREATE TYPE for ENUM
typ = postgresql.ENUM("val1", "val2", "val's 3", "méil", name="myname")
self.assert_compile(
postgresql.CreateEnumType(typ),
"CREATE TYPE myname AS ENUM ('val1', 'val2', 'val''s 3', 'méil')",
)
typ = postgresql.ENUM("val1", "val2", "val's 3", name="PleaseQuoteMe")
self.assert_compile(
postgresql.CreateEnumType(typ),
'CREATE TYPE "PleaseQuoteMe" AS ENUM '
"('val1', 'val2', 'val''s 3')",
)
def test_generic_enum(self):
e1 = Enum("x", "y", "z", name="somename")
e2 = Enum("x", "y", "z", name="somename", schema="someschema")
self.assert_compile(
postgresql.CreateEnumType(e1),
"CREATE TYPE somename AS ENUM ('x', 'y', 'z')",
)
self.assert_compile(
postgresql.CreateEnumType(e2),
"CREATE TYPE someschema.somename AS ENUM ('x', 'y', 'z')",
)
self.assert_compile(postgresql.DropEnumType(e1), "DROP TYPE somename")
self.assert_compile(
postgresql.DropEnumType(e2), "DROP TYPE someschema.somename"
)
t1 = Table("sometable", MetaData(), Column("somecolumn", e1))
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE sometable (somecolumn somename)",
)
t1 = Table(
"sometable",
MetaData(),
Column(
"somecolumn",
Enum("x", "y", "z", native_enum=False, create_constraint=True),
),
)
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE sometable (somecolumn "
"VARCHAR(1), CHECK (somecolumn IN ('x', "
"'y', 'z')))",
)
def test_cast_enum_schema(self):
"""test #6739"""
e1 = Enum("x", "y", "z", name="somename")
e2 = Enum("x", "y", "z", name="somename", schema="someschema")
stmt = select(cast(column("foo"), e1), cast(column("bar"), e2))
self.assert_compile(
stmt,
"SELECT CAST(foo AS somename) AS foo, "
"CAST(bar AS someschema.somename) AS bar",
)
def test_cast_double_pg_double(self):
"""test #5465:
test sqlalchemy Double/DOUBLE to PostgreSQL DOUBLE PRECISION
"""
d1 = sqltypes.Double
stmt = select(cast(column("foo"), d1))
self.assert_compile(
stmt, "SELECT CAST(foo AS DOUBLE PRECISION) AS foo"
)
def test_cast_enum_schema_translate(self):
"""test #6739"""
e1 = Enum("x", "y", "z", name="somename")
e2 = Enum("x", "y", "z", name="somename", schema="someschema")
schema_translate_map = {None: "bat", "someschema": "hoho"}
stmt = select(cast(column("foo"), e1), cast(column("bar"), e2))
self.assert_compile(
stmt,
"SELECT CAST(foo AS bat.somename) AS foo, "
"CAST(bar AS hoho.somename) AS bar",
schema_translate_map=schema_translate_map,
render_schema_translate=True,
)
def test_create_enum_schema_translate(self):
e1 = Enum("x", "y", "z", name="somename")
e2 = Enum("x", "y", "z", name="somename", schema="someschema")
schema_translate_map = {None: "foo", "someschema": "bar"}
self.assert_compile(
postgresql.CreateEnumType(e1),
"CREATE TYPE foo.somename AS ENUM ('x', 'y', 'z')",
schema_translate_map=schema_translate_map,
render_schema_translate=True,
)
self.assert_compile(
postgresql.CreateEnumType(e2),
"CREATE TYPE bar.somename AS ENUM ('x', 'y', 'z')",
schema_translate_map=schema_translate_map,
render_schema_translate=True,
)
def test_domain(self):
self.assert_compile(
postgresql.CreateDomainType(
DOMAIN(
"x",
Integer,
default=text("11"),
not_null=True,
check="VALUE < 0",
)
),
"CREATE DOMAIN x AS INTEGER DEFAULT 11 NOT NULL CHECK (VALUE < 0)",
)
self.assert_compile(
postgresql.CreateDomainType(
DOMAIN(
"sOmEnAmE",
Text,
collation="utf8",
constraint_name="a constraint",
not_null=True,
)
),
'CREATE DOMAIN "sOmEnAmE" AS TEXT COLLATE utf8 CONSTRAINT '
'"a constraint" NOT NULL',
)
self.assert_compile(
postgresql.CreateDomainType(
DOMAIN(
"foo",
Text,
collation="utf8",
default="foobar",
constraint_name="no_bar",
not_null=True,
check="VALUE != 'bar'",
)
),
"CREATE DOMAIN foo AS TEXT COLLATE utf8 DEFAULT 'foobar' "
"CONSTRAINT no_bar NOT NULL CHECK (VALUE != 'bar')",
)
def test_cast_domain_schema(self):
"""test #6739"""
d1 = DOMAIN("somename", Integer)
d2 = DOMAIN("somename", Integer, schema="someschema")
stmt = select(cast(column("foo"), d1), cast(column("bar"), d2))
self.assert_compile(
stmt,
"SELECT CAST(foo AS somename) AS foo, "
"CAST(bar AS someschema.somename) AS bar",
)
def test_create_domain_schema_translate(self):
d1 = DOMAIN("somename", Integer)
d2 = DOMAIN("somename", Integer, schema="someschema")
schema_translate_map = {None: "foo", "someschema": "bar"}
self.assert_compile(
postgresql.CreateDomainType(d1),
"CREATE DOMAIN foo.somename AS INTEGER ",
schema_translate_map=schema_translate_map,
render_schema_translate=True,
)
self.assert_compile(
postgresql.CreateDomainType(d2),
"CREATE DOMAIN bar.somename AS INTEGER ",
schema_translate_map=schema_translate_map,
render_schema_translate=True,
)
def test_create_table_with_schema_type_schema_translate(self):
e1 = Enum("x", "y", "z", name="somename")
e2 = Enum("x", "y", "z", name="somename", schema="someschema")
schema_translate_map = {None: "foo", "someschema": "bar"}
table = Table(
"some_table", MetaData(), Column("q", e1), Column("p", e2)
)
from sqlalchemy.schema import CreateTable
self.assert_compile(
CreateTable(table),
"CREATE TABLE foo.some_table (q foo.somename, p bar.somename)",
schema_translate_map=schema_translate_map,
render_schema_translate=True,
)
def test_create_table_array_embedded_schema_type_schema_translate(self):
"""test #6739"""
e1 = Enum("x", "y", "z", name="somename")
e2 = Enum("x", "y", "z", name="somename", schema="someschema")
schema_translate_map = {None: "foo", "someschema": "bar"}
table = Table(
"some_table",
MetaData(),
Column("q", PG_ARRAY(e1)),
Column("p", PG_ARRAY(e2)),
)
from sqlalchemy.schema import CreateTable
self.assert_compile(
CreateTable(table),
"CREATE TABLE foo.some_table (q foo.somename[], p bar.somename[])",
schema_translate_map=schema_translate_map,
render_schema_translate=True,
)
def test_create_table_with_tablespace(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
postgresql_tablespace="sometablespace",
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) TABLESPACE sometablespace",
)
def test_create_table_with_tablespace_quoted(self):
# testing quoting of tablespace name
m = MetaData()
tbl = Table(
"anothertable",
m,
Column("id", Integer),
postgresql_tablespace="table",
)
self.assert_compile(
schema.CreateTable(tbl),
'CREATE TABLE anothertable (id INTEGER) TABLESPACE "table"',
)
def test_create_table_inherits(self):
m = MetaData()
tbl = Table(
"atable", m, Column("id", Integer), postgresql_inherits="i1"
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) INHERITS ( i1 )",
)
def test_create_table_inherits_tuple(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
postgresql_inherits=("i1", "i2"),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) INHERITS ( i1, i2 )",
)
def test_create_table_inherits_quoting(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
postgresql_inherits=("Quote Me", "quote Me Too"),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) INHERITS "
'( "Quote Me", "quote Me Too" )',
)
def test_create_table_partition_by_list(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
Column("part_column", Integer),
postgresql_partition_by="LIST (part_column)",
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER, part_column INTEGER) "
"PARTITION BY LIST (part_column)",
)
def test_create_table_partition_by_range(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
Column("part_column", Integer),
postgresql_partition_by="RANGE (part_column)",
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER, part_column INTEGER) "
"PARTITION BY RANGE (part_column)",
)
def test_create_table_with_oids(self):
m = MetaData()
tbl = Table(
"atable", m, Column("id", Integer), postgresql_with_oids=True
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) WITH OIDS",
)
tbl2 = Table(
"anothertable",
m,
Column("id", Integer),
postgresql_with_oids=False,
)
self.assert_compile(
schema.CreateTable(tbl2),
"CREATE TABLE anothertable (id INTEGER) WITHOUT OIDS",
)
def test_create_table_with_storage_parameters(self):
m = MetaData()
tbl = Table("atable1", m, postgresql_with={"fillfactor": 100})
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable1 () WITH (fillfactor = 100)",
)
tbl2 = Table(
"atable2",
m,
postgresql_with={"toast.autovacuum_insert_scale_factor": 1.25},
)
self.assert_compile(
schema.CreateTable(tbl2),
"CREATE TABLE atable2 () "
"WITH (toast.autovacuum_insert_scale_factor = 1.25)",
)
tbl3 = Table(
"atable3",
m,
postgresql_with={
"user_catalog_table": False,
"parallel_workers": 15,
},
)
self.assert_compile(
schema.CreateTable(tbl3),
"CREATE TABLE atable3 () "
"WITH (user_catalog_table = False, parallel_workers = 15)",
)
def test_create_table_with_oncommit_option(self):
m = MetaData()
tbl = Table(
"atable", m, Column("id", Integer), postgresql_on_commit="drop"
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) ON COMMIT DROP",
)
def test_create_table_with_using_option(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
postgresql_using="heap",
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) USING heap",
)
def test_create_table_with_multiple_options(self):
m = MetaData()
tbl = Table(
"atable",
m,
Column("id", Integer),
postgresql_tablespace="sometablespace",
postgresql_with_oids=False,
postgresql_on_commit="preserve_rows",
postgresql_using="heap",
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) USING heap WITHOUT OIDS "
"ON COMMIT PRESERVE ROWS TABLESPACE sometablespace",
)
def test_create_partial_index(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx = Index(
"test_idx1",
tbl.c.data,
postgresql_where=and_(tbl.c.data > 5, tbl.c.data < 10),
)
idx = Index(
"test_idx1",
tbl.c.data,
postgresql_where=and_(tbl.c.data > 5, tbl.c.data < 10),
)
# test quoting and all that
idx2 = Index(
"test_idx2",
tbl.c.data,
postgresql_where=and_(tbl.c.data > "a", tbl.c.data < "b's"),
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl (data) "
"WHERE data > 5 AND data < 10",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (data) "
"WHERE data > 'a' AND data < 'b''s'",
dialect=postgresql.dialect(),
)
idx3 = Index(
"test_idx2",
tbl.c.data,
postgresql_where=text("data > 'a' AND data < 'b''s'"),
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx2 ON testtbl (data) "
"WHERE data > 'a' AND data < 'b''s'",
dialect=postgresql.dialect(),
)
def test_create_index_with_ops(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("data", String),
Column("data2", Integer, key="d2"),
)
idx = Index(
"test_idx1",
tbl.c.data,
postgresql_ops={"data": "text_pattern_ops"},
)
idx2 = Index(
"test_idx2",
tbl.c.data,
tbl.c.d2,
postgresql_ops={"data": "text_pattern_ops", "d2": "int4_ops"},
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl (data text_pattern_ops)",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl "
"(data text_pattern_ops, data2 int4_ops)",
dialect=postgresql.dialect(),
)
@testing.combinations(
(
lambda tbl: schema.CreateIndex(
Index(
"test_idx1",
tbl.c.data,
unique=True,
postgresql_nulls_not_distinct=True,
)
),
"CREATE UNIQUE INDEX test_idx1 ON test_tbl "
"(data) NULLS NOT DISTINCT",
),
(
lambda tbl: schema.CreateIndex(
Index(
"test_idx2",
tbl.c.data2,
unique=True,
postgresql_nulls_not_distinct=False,
)
),
"CREATE UNIQUE INDEX test_idx2 ON test_tbl "
"(data2) NULLS DISTINCT",
),
(
lambda tbl: schema.CreateIndex(
Index(
"test_idx3",
tbl.c.data3,
unique=True,
)
),
"CREATE UNIQUE INDEX test_idx3 ON test_tbl (data3)",
),
(
lambda tbl: schema.CreateIndex(
Index(
"test_idx3_complex",
tbl.c.data3,
postgresql_nulls_not_distinct=True,
postgresql_include=["data2"],
postgresql_where=and_(tbl.c.data3 > 5),
postgresql_with={"fillfactor": 50},
)
),
"CREATE INDEX test_idx3_complex ON test_tbl "
"(data3) INCLUDE (data2) NULLS NOT DISTINCT WITH "
"(fillfactor = 50) WHERE data3 > 5",
),
(
lambda tbl: schema.AddConstraint(
schema.UniqueConstraint(
tbl.c.data,
name="uq_data1",
postgresql_nulls_not_distinct=True,
)
),
"ALTER TABLE test_tbl ADD CONSTRAINT uq_data1 UNIQUE "
"NULLS NOT DISTINCT (data)",
),
(
lambda tbl: schema.AddConstraint(
schema.UniqueConstraint(
tbl.c.data2,
name="uq_data2",
postgresql_nulls_not_distinct=False,
)
),
"ALTER TABLE test_tbl ADD CONSTRAINT uq_data2 UNIQUE "
"NULLS DISTINCT (data2)",
),
(
lambda tbl: schema.AddConstraint(
schema.UniqueConstraint(
tbl.c.data3,
name="uq_data3",
)
),
"ALTER TABLE test_tbl ADD CONSTRAINT uq_data3 UNIQUE (data3)",
),
)
def test_nulls_not_distinct(self, expr_fn, expected):
dd = PGDialect()
m = MetaData()
tbl = Table(
"test_tbl",
m,
Column("data", String),
Column("data2", Integer),
Column("data3", Integer),
)
expr = testing.resolve_lambda(expr_fn, tbl=tbl)
self.assert_compile(expr, expected, dialect=dd)
@testing.combinations(
(
lambda tbl: schema.AddConstraint(
UniqueConstraint(tbl.c.id, postgresql_include=[tbl.c.value])
),
"ALTER TABLE foo ADD UNIQUE (id) INCLUDE (value)",
),
(
lambda tbl: schema.AddConstraint(
PrimaryKeyConstraint(
tbl.c.id, postgresql_include=[tbl.c.value, "misc"]
)
),
"ALTER TABLE foo ADD PRIMARY KEY (id) INCLUDE (value, misc)",
),
(
lambda tbl: schema.CreateIndex(
Index("idx", tbl.c.id, postgresql_include=[tbl.c.value])
),
"CREATE INDEX idx ON foo (id) INCLUDE (value)",
),
)
def test_include(self, expr_fn, expected):
m = MetaData()
tbl = Table(
"foo",
m,
Column("id", Integer, nullable=False),
Column("value", Integer, nullable=False),
Column("misc", String),
)
expr = testing.resolve_lambda(expr_fn, tbl=tbl)
self.assert_compile(expr, expected)
def test_create_index_with_labeled_ops(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("data", String),
Column("data2", Integer, key="d2"),
)
idx = Index(
"test_idx1",
func.lower(tbl.c.data).label("data_lower"),
postgresql_ops={"data_lower": "text_pattern_ops"},
)
idx2 = Index(
"test_idx2",
(func.xyz(tbl.c.data) + tbl.c.d2).label("bar"),
tbl.c.d2.label("foo"),
postgresql_ops={"bar": "text_pattern_ops", "foo": "int4_ops"},
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl "
"(lower(data) text_pattern_ops)",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl "
"((xyz(data) + data2) text_pattern_ops, "
"data2 int4_ops)",
dialect=postgresql.dialect(),
)
def test_create_index_with_text_or_composite(self):
m = MetaData()
tbl = Table("testtbl", m, Column("d1", String), Column("d2", Integer))
idx = Index("test_idx1", text("x"))
tbl.append_constraint(idx)
idx2 = Index("test_idx2", text("y"), tbl.c.d2)
idx3 = Index(
"test_idx2",
tbl.c.d1,
text("y"),
tbl.c.d2,
postgresql_ops={"d1": "x1", "d2": "x2"},
)
idx4 = Index(
"test_idx2",
tbl.c.d1,
tbl.c.d2 > 5,
text("q"),
postgresql_ops={"d1": "x1", "d2": "x2"},
)
idx5 = Index(
"test_idx2",
tbl.c.d1,
(tbl.c.d2 > 5).label("g"),
text("q"),
postgresql_ops={"d1": "x1", "g": "x2"},
)
self.assert_compile(
schema.CreateIndex(idx), "CREATE INDEX test_idx1 ON testtbl (x)"
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (y, d2)",
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx2 ON testtbl (d1 x1, y, d2 x2)",
)
# note that at the moment we do not expect the 'd2' op to
# pick up on the "d2 > 5" expression
self.assert_compile(
schema.CreateIndex(idx4),
"CREATE INDEX test_idx2 ON testtbl (d1 x1, (d2 > 5), q)",
)
# however it does work if we label!
self.assert_compile(
schema.CreateIndex(idx5),
"CREATE INDEX test_idx2 ON testtbl (d1 x1, (d2 > 5) x2, q)",
)
def test_create_index_with_using(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String))
idx1 = Index("test_idx1", tbl.c.data)
idx2 = Index("test_idx2", tbl.c.data, postgresql_using="btree")
idx3 = Index("test_idx3", tbl.c.data, postgresql_using="hash")
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl (data)",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl USING btree (data)",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx3 ON testtbl USING hash (data)",
dialect=postgresql.dialect(),
)
def test_create_index_with_with(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String))
idx1 = Index("test_idx1", tbl.c.data)
idx2 = Index(
"test_idx2", tbl.c.data, postgresql_with={"fillfactor": 50}
)
idx3 = Index(
"test_idx3",
tbl.c.data,
postgresql_using="gist",
postgresql_with={"buffering": "off"},
)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl (data)",
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl "
"(data) "
"WITH (fillfactor = 50)",
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx3 ON testtbl "
"USING gist (data) "
"WITH (buffering = off)",
)
def test_create_index_with_using_unusual_conditions(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String))
self.assert_compile(
schema.CreateIndex(
Index("test_idx1", tbl.c.data, postgresql_using="GIST")
),
"CREATE INDEX test_idx1 ON testtbl USING gist (data)",
)
self.assert_compile(
schema.CreateIndex(
Index(
"test_idx1",
tbl.c.data,
postgresql_using="some_custom_method",
)
),
"CREATE INDEX test_idx1 ON testtbl "
"USING some_custom_method (data)",
)
assert_raises_message(
exc.CompileError,
"Unexpected SQL phrase: 'gin invalid sql'",
schema.CreateIndex(
Index(
"test_idx2", tbl.c.data, postgresql_using="gin invalid sql"
)
).compile,
dialect=postgresql.dialect(),
)
def test_create_index_with_tablespace(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String))
idx1 = Index("test_idx1", tbl.c.data)
idx2 = Index(
"test_idx2", tbl.c.data, postgresql_tablespace="sometablespace"
)
idx3 = Index(
"test_idx3",
tbl.c.data,
postgresql_tablespace="another table space",
)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl (data)",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl "
"(data) "
"TABLESPACE sometablespace",
dialect=postgresql.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx3 ON testtbl "
"(data) "
'TABLESPACE "another table space"',
dialect=postgresql.dialect(),
)
def test_create_index_with_multiple_options(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String))
idx1 = Index(
"test_idx1",
tbl.c.data,
postgresql_using="btree",
postgresql_tablespace="atablespace",
postgresql_with={"fillfactor": 60},
postgresql_where=and_(tbl.c.data > 5, tbl.c.data < 10),
)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl "
"USING btree (data) "
"WITH (fillfactor = 60) "
"TABLESPACE atablespace "
"WHERE data > 5 AND data < 10",
dialect=postgresql.dialect(),
)
def test_create_index_expr_gets_parens(self):
m = MetaData()
tbl = Table("testtbl", m, Column("x", Integer), Column("y", Integer))
idx1 = Index("test_idx1", 5 // (tbl.c.x + tbl.c.y))
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl ((5 / (x + y)))",
)
def test_create_index_literals(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("test_idx1", tbl.c.data + 5)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl ((data + 5))",
)
def test_create_index_concurrently(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("test_idx1", tbl.c.data, postgresql_concurrently=True)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)",
)
dialect_8_1 = postgresql.dialect()
dialect_8_1._supports_create_index_concurrently = False
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl (data)",
dialect=dialect_8_1,
)
def test_drop_index_concurrently(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("test_idx1", tbl.c.data, postgresql_concurrently=True)
self.assert_compile(
schema.DropIndex(idx1), "DROP INDEX CONCURRENTLY test_idx1"
)
dialect_9_1 = postgresql.dialect()
dialect_9_1._supports_drop_index_concurrently = False
self.assert_compile(
schema.DropIndex(idx1), "DROP INDEX test_idx1", dialect=dialect_9_1
)
def test_create_check_constraint_not_valid(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("data", Integer),
CheckConstraint("data = 0", postgresql_not_valid=True),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE testtbl (data INTEGER, CHECK (data = 0) NOT VALID)",
)
def test_create_foreign_key_constraint_not_valid(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("a", Integer),
Column("b", Integer),
ForeignKeyConstraint(
"b", ["testtbl.a"], postgresql_not_valid=True
),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE testtbl ("
"a INTEGER, "
"b INTEGER, "
"FOREIGN KEY(b) REFERENCES testtbl (a) NOT VALID"
")",
)
def test_create_foreign_key_column_not_valid(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("a", Integer),
Column("b", ForeignKey("testtbl.a", postgresql_not_valid=True)),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE testtbl ("
"a INTEGER, "
"b INTEGER, "
"FOREIGN KEY(b) REFERENCES testtbl (a) NOT VALID"
")",
)
def test_create_foreign_key_constraint_ondelete_column_list(self):
m = MetaData()
pktable = Table(
"pktable",
m,
Column("tid", Integer, primary_key=True),
Column("id", Integer, primary_key=True),
)
fktable = Table(
"fktable",
m,
Column("tid", Integer),
Column("id", Integer),
Column("fk_id_del_set_null", Integer),
Column("fk_id_del_set_default", Integer, server_default=text("0")),
ForeignKeyConstraint(
columns=["tid", "fk_id_del_set_null"],
refcolumns=[pktable.c.tid, pktable.c.id],
ondelete="SET NULL (fk_id_del_set_null)",
),
ForeignKeyConstraint(
columns=["tid", "fk_id_del_set_default"],
refcolumns=[pktable.c.tid, pktable.c.id],
ondelete="SET DEFAULT(fk_id_del_set_default)",
),
)
self.assert_compile(
schema.CreateTable(fktable),
"CREATE TABLE fktable ("
"tid INTEGER, id INTEGER, "
"fk_id_del_set_null INTEGER, "
"fk_id_del_set_default INTEGER DEFAULT 0, "
"FOREIGN KEY(tid, fk_id_del_set_null)"
" REFERENCES pktable (tid, id)"
" ON DELETE SET NULL (fk_id_del_set_null), "
"FOREIGN KEY(tid, fk_id_del_set_default)"
" REFERENCES pktable (tid, id)"
" ON DELETE SET DEFAULT(fk_id_del_set_default)"
")",
)
def test_exclude_constraint_min(self):
m = MetaData()
tbl = Table("testtbl", m, Column("room", Integer, primary_key=True))
cons = ExcludeConstraint(("room", "="))
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist (room WITH =)",
dialect=postgresql.dialect(),
)
@testing.combinations(
(True, "deferred"),
(False, "immediate"),
argnames="deferrable_value, initially_value",
)
def test_copy_exclude_constraint_adhoc_columns(
self, deferrable_value, initially_value
):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, Sequence("foo_id_seq"), primary_key=True),
Column("valid_from_date", Date(), nullable=True),
Column("valid_thru_date", Date(), nullable=True),
)
sql_text = "daterange(valid_from_date, valid_thru_date, '[]')"
cons = ExcludeConstraint(
(literal_column(sql_text), "&&"),
where=column("valid_from_date") <= column("valid_thru_date"),
name="ex_mytable_valid_date_range",
deferrable=deferrable_value,
initially=initially_value,
)
table.append_constraint(cons)
eq_(cons.columns.keys(), [sql_text])
expected = (
"ALTER TABLE mytable ADD CONSTRAINT ex_mytable_valid_date_range "
"EXCLUDE USING gist "
"(daterange(valid_from_date, valid_thru_date, '[]') WITH &&) "
"WHERE (valid_from_date <= valid_thru_date) "
"%s %s"
% (
"NOT DEFERRABLE" if not deferrable_value else "DEFERRABLE",
"INITIALLY %s" % initially_value,
)
)
self.assert_compile(
schema.AddConstraint(cons),
expected,
dialect=postgresql.dialect(),
)
meta2 = MetaData()
table2 = table.to_metadata(meta2)
cons2 = [
c for c in table2.constraints if isinstance(c, ExcludeConstraint)
][0]
self.assert_compile(
schema.AddConstraint(cons2),
expected,
dialect=postgresql.dialect(),
)
def test_exclude_constraint_full(self):
m = MetaData()
room = Column("room", Integer, primary_key=True)
tbl = Table("testtbl", m, room, Column("during", TSRANGE))
room = Column("room", Integer, primary_key=True)
cons = ExcludeConstraint(
(room, "="),
("during", "&&"),
name="my_name",
using="gist",
where="room > 100",
deferrable=True,
initially="immediate",
ops={"room": "my_opclass"},
)
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD CONSTRAINT my_name "
"EXCLUDE USING gist "
"(room my_opclass WITH =, during WITH "
"&&) WHERE "
"(room > 100) DEFERRABLE INITIALLY immediate",
dialect=postgresql.dialect(),
)
def test_exclude_constraint_copy(self):
m = MetaData()
cons = ExcludeConstraint(("room", "="))
tbl = Table(
"testtbl", m, Column("room", Integer, primary_key=True), cons
)
# apparently you can't copy a ColumnCollectionConstraint until
# after it has been bound to a table...
cons_copy = cons._copy()
tbl.append_constraint(cons_copy)
self.assert_compile(
schema.AddConstraint(cons_copy),
"ALTER TABLE testtbl ADD EXCLUDE USING gist (room WITH =)",
)
def test_exclude_constraint_copy_complex(self):
m = MetaData()
tbl = Table("foo", m, Column("x", Integer), Column("y", Integer))
cons = ExcludeConstraint(
(tbl.c.x, "*"),
(text("x-y"), "%"),
(literal_column("x+y"), "$"),
(tbl.c.x // tbl.c.y, "??"),
(func.power(tbl.c.x, 42), "="),
(func.int8range(column("x"), column("y")), "&&"),
("y", "^"),
)
tbl.append_constraint(cons)
expected = (
"ALTER TABLE {name} ADD EXCLUDE USING gist "
"(x WITH *, x-y WITH %, x+y WITH $, x / y WITH ??, "
"power(x, 42) WITH =, int8range(x, y) WITH &&, y WITH ^)"
)
self.assert_compile(
schema.AddConstraint(cons),
expected.format(name="foo"),
dialect=postgresql.dialect(),
)
m2 = MetaData()
tbl2 = tbl.to_metadata(m2, name="bar")
(cons2,) = [
c for c in tbl2.constraints if isinstance(c, ExcludeConstraint)
]
self.assert_compile(
schema.AddConstraint(cons2),
expected.format(name="bar"),
dialect=postgresql.dialect(),
)
def test_exclude_constraint_copy_where_using(self):
m = MetaData()
tbl = Table("testtbl", m, Column("room", Integer, primary_key=True))
cons = ExcludeConstraint(
(tbl.c.room, "="), where=tbl.c.room > 5, using="foobar"
)
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING foobar "
"(room WITH =) WHERE (testtbl.room > 5)",
)
m2 = MetaData()
tbl2 = tbl.to_metadata(m2)
self.assert_compile(
schema.CreateTable(tbl2),
"CREATE TABLE testtbl (room SERIAL NOT NULL, "
"PRIMARY KEY (room), "
"EXCLUDE USING foobar "
"(room WITH =) WHERE (testtbl.room > 5))",
)
def test_exclude_constraint_text(self):
m = MetaData()
cons = ExcludeConstraint((text("room::TEXT"), "="))
Table("testtbl", m, Column("room", String), cons)
eq_(list(cons.columns), [])
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist "
"(room::TEXT WITH =)",
)
def test_exclude_constraint_colname_needs_quoting(self):
m = MetaData()
cons = ExcludeConstraint(("Some Column Name", "="))
Table("testtbl", m, Column("Some Column Name", String), cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist "
'("Some Column Name" WITH =)',
)
def test_exclude_constraint_with_using_unusual_conditions(self):
m = MetaData()
cons = ExcludeConstraint(("q", "="), using="not a keyword")
Table("testtbl", m, Column("q", String), cons)
assert_raises_message(
exc.CompileError,
"Unexpected SQL phrase: 'not a keyword'",
schema.AddConstraint(cons).compile,
dialect=postgresql.dialect(),
)
def test_exclude_constraint_cast(self):
m = MetaData()
tbl = Table("testtbl", m, Column("room", String))
cons = ExcludeConstraint((cast(tbl.c.room, Text), "="))
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist "
"(CAST(room AS TEXT) WITH =)",
)
def test_exclude_constraint_cast_quote(self):
m = MetaData()
tbl = Table("testtbl", m, Column("Room", String))
cons = ExcludeConstraint((cast(tbl.c.Room, Text), "="))
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist "
'(CAST("Room" AS TEXT) WITH =)',
)
def test_exclude_constraint_when(self):
m = MetaData()
tbl = Table("testtbl", m, Column("room", String))
cons = ExcludeConstraint(("room", "="), where=tbl.c.room.in_(["12"]))
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist "
"(room WITH =) WHERE (testtbl.room IN ('12'))",
dialect=postgresql.dialect(),
)
def test_exclude_constraint_ops_many(self):
m = MetaData()
tbl = Table(
"testtbl", m, Column("room", String), Column("during", TSRANGE)
)
cons = ExcludeConstraint(
("room", "="),
("during", "&&"),
ops={"room": "first_opsclass", "during": "second_opclass"},
)
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE testtbl ADD EXCLUDE USING gist "
"(room first_opsclass WITH =, during second_opclass WITH &&)",
dialect=postgresql.dialect(),
)
def test_exclude_constraint_expression(self):
m = MetaData()
tbl = Table("foo", m, Column("x", Integer), Column("y", Integer))
cons = ExcludeConstraint((func.int8range(column("x"), tbl.c.y), "&&"))
tbl.append_constraint(cons)
# only the first col is considered. see #9233
eq_(cons.columns.keys(), ["x"])
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE foo ADD EXCLUDE USING gist "
"(int8range(x, y) WITH &&)",
dialect=postgresql.dialect(),
)
def test_exclude_constraint_literal_binds(self):
m = MetaData()
tbl = Table("foo", m, Column("x", Integer), Column("y", Integer))
cons = ExcludeConstraint(
(func.power(tbl.c.x, 42), "="),
(func.int8range(column("x"), "y"), "&&"),
)
tbl.append_constraint(cons)
self.assert_compile(
schema.AddConstraint(cons),
"ALTER TABLE foo ADD EXCLUDE USING gist "
"(power(x, 42) WITH =, int8range(x, 'y') WITH &&)",
dialect=postgresql.dialect(),
)
def test_substring(self):
self.assert_compile(
func.substring("abc", 1, 2),
"SUBSTRING(%(substring_1)s FROM %(substring_2)s "
"FOR %(substring_3)s)",
)
self.assert_compile(
func.substring("abc", 1),
"SUBSTRING(%(substring_1)s FROM %(substring_2)s)",
)
def test_for_update(self):
table1 = table(
"mytable", column("myid"), column("name"), column("description")
)
self.assert_compile(
table1.select().where(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR UPDATE",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR UPDATE NOWAIT",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(skip_locked=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR UPDATE SKIP LOCKED",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(read=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR SHARE",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(read=True, nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s FOR SHARE NOWAIT",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(key_share=True, nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR NO KEY UPDATE NOWAIT",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(key_share=True, read=True, nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR KEY SHARE NOWAIT",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(read=True, skip_locked=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE SKIP LOCKED",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(of=table1.c.myid),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR UPDATE OF mytable",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(read=True, nowait=True, of=table1),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable NOWAIT",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(
key_share=True, read=True, nowait=True, of=table1
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR KEY SHARE OF mytable NOWAIT",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(read=True, nowait=True, of=table1.c.myid),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable NOWAIT",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(
read=True, nowait=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable NOWAIT",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(
read=True,
skip_locked=True,
of=[table1.c.myid, table1.c.name],
key_share=True,
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR KEY SHARE OF mytable SKIP LOCKED",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(
skip_locked=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR UPDATE OF mytable SKIP LOCKED",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(
read=True, skip_locked=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable SKIP LOCKED",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(
key_share=True, nowait=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR NO KEY UPDATE OF mytable NOWAIT",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(
key_share=True,
skip_locked=True,
of=[table1.c.myid, table1.c.name],
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR NO KEY UPDATE OF mytable SKIP LOCKED",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(
key_share=True, of=[table1.c.myid, table1.c.name]
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR NO KEY UPDATE OF mytable",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(key_share=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR NO KEY UPDATE",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(read=True, key_share=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR KEY SHARE",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(read=True, key_share=True, of=table1),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR KEY SHARE OF mytable",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(read=True, of=table1),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR SHARE OF mytable",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(read=True, key_share=True, skip_locked=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR KEY SHARE SKIP LOCKED",
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(key_share=True, skip_locked=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR NO KEY UPDATE SKIP LOCKED",
)
ta = table1.alias()
self.assert_compile(
ta.select()
.where(ta.c.myid == 7)
.with_for_update(of=[ta.c.myid, ta.c.name]),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable AS mytable_1 "
"WHERE mytable_1.myid = %(myid_1)s FOR UPDATE OF mytable_1",
)
table2 = table("table2", column("mytable_id"))
join = table2.join(table1, table2.c.mytable_id == table1.c.myid)
self.assert_compile(
join.select()
.where(table2.c.mytable_id == 7)
.with_for_update(of=[join]),
"SELECT table2.mytable_id, "
"mytable.myid, mytable.name, mytable.description "
"FROM table2 "
"JOIN mytable ON table2.mytable_id = mytable.myid "
"WHERE table2.mytable_id = %(mytable_id_1)s "
"FOR UPDATE OF mytable, table2",
)
join = table2.join(ta, table2.c.mytable_id == ta.c.myid)
self.assert_compile(
join.select()
.where(table2.c.mytable_id == 7)
.with_for_update(of=[join]),
"SELECT table2.mytable_id, "
"mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM table2 "
"JOIN mytable AS mytable_1 "
"ON table2.mytable_id = mytable_1.myid "
"WHERE table2.mytable_id = %(mytable_id_1)s "
"FOR UPDATE OF mytable_1, table2",
)
# ensure of=text() for of works
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(of=text("table1")),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR UPDATE OF table1",
)
# ensure literal_column of works
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(of=literal_column("table1")),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %(myid_1)s "
"FOR UPDATE OF table1",
)
# test issue #12417
subquery = select(table1.c.myid).with_for_update(of=table1).lateral()
statement = select(subquery.c.myid)
self.assert_compile(
statement,
"SELECT anon_1.myid FROM LATERAL (SELECT mytable.myid AS myid "
"FROM mytable FOR UPDATE OF mytable) AS anon_1",
)
def test_for_update_with_schema(self):
m = MetaData()
table1 = Table(
"mytable", m, Column("myid"), Column("name"), schema="testschema"
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(of=table1),
"SELECT testschema.mytable.myid, testschema.mytable.name "
"FROM testschema.mytable "
"WHERE testschema.mytable.myid = %(myid_1)s "
"FOR UPDATE OF mytable",
)
def test_reserved_words(self):
table = Table(
"pg_table",
MetaData(),
Column("col1", Integer),
Column("variadic", Integer),
)
x = select(table.c.col1, table.c.variadic)
self.assert_compile(
x, """SELECT pg_table.col1, pg_table."variadic" FROM pg_table"""
)
def _array_any_deprecation(self):
return testing.expect_deprecated(
r"The ARRAY.Comparator.any\(\) and "
r"ARRAY.Comparator.all\(\) methods "
r"for arrays are deprecated for removal, along with the "
r"PG-specific Any\(\) "
r"and All\(\) functions. See any_\(\) and all_\(\) functions for "
"modern use. "
)
def test_array(self):
c = Column("x", postgresql.ARRAY(Integer))
self.assert_compile(
cast(c, postgresql.ARRAY(Integer)), "CAST(x AS INTEGER[])"
)
self.assert_compile(c[5], "x[%(x_1)s]", checkparams={"x_1": 5})
self.assert_compile(
c[5:7], "x[%(x_1)s:%(x_2)s]", checkparams={"x_2": 7, "x_1": 5}
)
self.assert_compile(
c[5:7][2:3],
"x[%(x_1)s:%(x_2)s][%(param_1)s:%(param_2)s]",
checkparams={"x_2": 7, "x_1": 5, "param_1": 2, "param_2": 3},
)
self.assert_compile(
c[5:7][3],
"x[%(x_1)s:%(x_2)s][%(param_1)s]",
checkparams={"x_2": 7, "x_1": 5, "param_1": 3},
)
self.assert_compile(
c.contains([1]),
"x @> %(x_1)s::INTEGER[]",
checkparams={"x_1": [1]},
dialect=PGDialect_psycopg2(),
)
self.assert_compile(
c.contained_by([2]),
"x <@ %(x_1)s::INTEGER[]",
checkparams={"x_1": [2]},
dialect=PGDialect_psycopg2(),
)
self.assert_compile(
c.contained_by([2]),
"x <@ %(x_1)s",
checkparams={"x_1": [2]},
dialect=PGDialect(),
)
self.assert_compile(
c.overlap([3]),
"x && %(x_1)s::INTEGER[]",
checkparams={"x_1": [3]},
dialect=PGDialect_psycopg2(),
)
def test_array_modern_any_all(self):
c = Column("x", postgresql.ARRAY(Integer))
self.assert_compile(
4 == c.any_(),
"%(param_1)s = ANY (x)",
checkparams={"param_1": 4},
)
self.assert_compile(
5 == any_(c),
"%(param_1)s = ANY (x)",
checkparams={"param_1": 5},
)
self.assert_compile(
~(c.any_() == 5),
"NOT (%(param_1)s = ANY (x))",
checkparams={"param_1": 5},
)
self.assert_compile(
~(5 == c.any_()),
"NOT (%(param_1)s = ANY (x))",
checkparams={"param_1": 5},
)
self.assert_compile(
5 != any_(c),
"%(param_1)s != ANY (x)",
checkparams={"param_1": 5},
)
self.assert_compile(
6 > all_(c),
"%(param_1)s > ALL (x)",
checkparams={"param_1": 6},
)
self.assert_compile(
7 < all_(c),
"%(param_1)s < ALL (x)",
checkparams={"param_1": 7},
)
self.assert_compile(
c.all_() == 5,
"%(param_1)s = ALL (x)",
checkparams={"param_1": 5},
)
self.assert_compile(
5 == c.all_(),
"%(param_1)s = ALL (x)",
checkparams={"param_1": 5},
)
self.assert_compile(
~(5 == all_(c)),
"NOT (%(param_1)s = ALL (x))",
checkparams={"param_1": 5},
)
self.assert_compile(
~(all_(c) == 5),
"NOT (%(param_1)s = ALL (x))",
checkparams={"param_1": 5},
)
def test_array_deprecated_any_all(self):
c = Column("x", postgresql.ARRAY(Integer))
with self._array_any_deprecation():
self.assert_compile(
postgresql.Any(4, c),
"%(x_1)s = ANY (x)",
checkparams={"x_1": 4},
)
with self._array_any_deprecation():
self.assert_compile(
c.any(5),
"%(x_1)s = ANY (x)",
checkparams={"x_1": 5},
)
with self._array_any_deprecation():
self.assert_compile(
~c.any(5),
"NOT (%(x_1)s = ANY (x))",
checkparams={"x_1": 5},
)
with self._array_any_deprecation():
self.assert_compile(
c.any(5, operator=operators.ne),
"%(x_1)s != ANY (x)",
checkparams={"x_1": 5},
)
with self._array_any_deprecation():
self.assert_compile(
postgresql.All(6, c, operator=operators.gt),
"%(x_1)s > ALL (x)",
checkparams={"x_1": 6},
)
with self._array_any_deprecation():
self.assert_compile(
c.all(7, operator=operators.lt),
"%(x_1)s < ALL (x)",
checkparams={"x_1": 7},
)
with self._array_any_deprecation():
self.assert_compile(
c.all(5),
"%(x_1)s = ALL (x)",
checkparams={"x_1": 5},
)
with self._array_any_deprecation():
self.assert_compile(
~c.all(5),
"NOT (%(x_1)s = ALL (x))",
checkparams={"x_1": 5},
)
@testing.combinations(
(lambda c: c.overlap, "&&"),
(lambda c: c.contains, "@>"),
(lambda c: c.contained_by, "<@"),
)
def test_overlap_no_cartesian(self, op_fn, expected_op):
"""test #6886"""
t1 = table(
"t1",
column("id", Integer),
column("ancestor_ids", postgresql.ARRAY(Integer)),
)
t1a = t1.alias()
t1b = t1.alias()
stmt = (
select(t1, t1a, t1b)
.where(op_fn(t1a.c.ancestor_ids)(postgresql.array((t1.c.id,))))
.where(op_fn(t1b.c.ancestor_ids)(postgresql.array((t1.c.id,))))
)
self.assert_compile(
stmt,
"SELECT t1.id, t1.ancestor_ids, t1_1.id AS id_1, "
"t1_1.ancestor_ids AS ancestor_ids_1, t1_2.id AS id_2, "
"t1_2.ancestor_ids AS ancestor_ids_2 "
"FROM t1, t1 AS t1_1, t1 AS t1_2 "
"WHERE t1_1.ancestor_ids %(op)s ARRAY[t1.id] "
"AND t1_2.ancestor_ids %(op)s ARRAY[t1.id]" % {"op": expected_op},
from_linting=True,
)
@testing.combinations((True,), (False,))
def test_array_zero_indexes(self, zero_indexes):
c = Column("x", postgresql.ARRAY(Integer, zero_indexes=zero_indexes))
add_one = 1 if zero_indexes else 0
self.assert_compile(
cast(c, postgresql.ARRAY(Integer, zero_indexes=zero_indexes)),
"CAST(x AS INTEGER[])",
)
self.assert_compile(
c[5], "x[%(x_1)s]", checkparams={"x_1": 5 + add_one}
)
self.assert_compile(
c[5:7],
"x[%(x_1)s:%(x_2)s]",
checkparams={"x_2": 7 + add_one, "x_1": 5 + add_one},
)
self.assert_compile(
c[5:7][2:3],
"x[%(x_1)s:%(x_2)s][%(param_1)s:%(param_2)s]",
checkparams={
"x_2": 7 + add_one,
"x_1": 5 + add_one,
"param_1": 2 + add_one,
"param_2": 3 + add_one,
},
)
self.assert_compile(
c[5:7][3],
"x[%(x_1)s:%(x_2)s][%(param_1)s]",
checkparams={
"x_2": 7 + add_one,
"x_1": 5 + add_one,
"param_1": 3 + add_one,
},
)
def test_array_literal_type(self):
isinstance(postgresql.array([1, 2]).type, postgresql.ARRAY)
is_(postgresql.array([1, 2]).type.item_type._type_affinity, Integer)
is_(
postgresql.array(
[1, 2], type_=String
).type.item_type._type_affinity,
String,
)
@testing.combinations(
("with type_", Date, "ARRAY[]::DATE[]"),
("no type_", None, "ARRAY[]"),
id_="iaa",
)
def test_array_literal_empty(self, type_, expected):
self.assert_compile(postgresql.array([], type_=type_), expected)
def test_array_literal(self):
self.assert_compile(
func.array_dims(
postgresql.array([1, 2]) + postgresql.array([3, 4, 5])
),
"array_dims(ARRAY[%(param_1)s, %(param_2)s] || "
"ARRAY[%(param_3)s, %(param_4)s, %(param_5)s])",
checkparams={
"param_5": 5,
"param_4": 4,
"param_1": 1,
"param_3": 3,
"param_2": 2,
},
)
def test_array_literal_compare(self):
self.assert_compile(
postgresql.array([1, 2]) == [3, 4, 5],
"ARRAY[%(param_1)s, %(param_2)s] = "
"ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]",
checkparams={
"param_5": 5,
"param_4": 4,
"param_1": 1,
"param_3": 3,
"param_2": 2,
},
)
def test_array_literal_contains(self):
self.assert_compile(
postgresql.array([1, 2]).contains([3, 4, 5]),
"ARRAY[%(param_1)s, %(param_2)s] @> ARRAY[%(param_3)s, "
"%(param_4)s, %(param_5)s]",
checkparams={
"param_1": 1,
"param_2": 2,
"param_3": 3,
"param_4": 4,
"param_5": 5,
},
)
self.assert_compile(
postgresql.array(["a", "b"]).contains([""]),
"ARRAY[%(param_1)s, %(param_2)s] @> ARRAY[%(param_3)s]",
checkparams={"param_1": "a", "param_2": "b", "param_3": ""},
)
self.assert_compile(
postgresql.array(["a", "b"]).contains([]),
"ARRAY[%(param_1)s, %(param_2)s] @> ARRAY[]",
checkparams={"param_1": "a", "param_2": "b"},
)
self.assert_compile(
postgresql.array(["a", "b"]).contains([0]),
"ARRAY[%(param_1)s, %(param_2)s] @> ARRAY[%(param_3)s]",
checkparams={"param_1": "a", "param_2": "b", "param_3": 0},
)
def test_array_literal_contained_by(self):
self.assert_compile(
postgresql.array(["a", "b"]).contained_by(["a", "b", "c"]),
"ARRAY[%(param_1)s, %(param_2)s] <@ ARRAY[%(param_3)s, "
"%(param_4)s, %(param_5)s]",
checkparams={
"param_1": "a",
"param_2": "b",
"param_3": "a",
"param_4": "b",
"param_5": "c",
},
)
self.assert_compile(
postgresql.array([1, 2]).contained_by([3, 4, 5]),
"ARRAY[%(param_1)s, %(param_2)s] <@ ARRAY[%(param_3)s, "
"%(param_4)s, %(param_5)s]",
checkparams={
"param_1": 1,
"param_2": 2,
"param_3": 3,
"param_4": 4,
"param_5": 5,
},
)
self.assert_compile(
postgresql.array(["a", "b"]).contained_by([""]),
"ARRAY[%(param_1)s, %(param_2)s] <@ ARRAY[%(param_3)s]",
checkparams={"param_1": "a", "param_2": "b", "param_3": ""},
)
self.assert_compile(
postgresql.array(["a", "b"]).contained_by([]),
"ARRAY[%(param_1)s, %(param_2)s] <@ ARRAY[]",
checkparams={"param_1": "a", "param_2": "b"},
)
self.assert_compile(
postgresql.array(["a", "b"]).contained_by([0]),
"ARRAY[%(param_1)s, %(param_2)s] <@ ARRAY[%(param_3)s]",
checkparams={"param_1": "a", "param_2": "b", "param_3": 0},
)
def test_array_literal_insert(self):
m = MetaData()
t = Table("t", m, Column("data", postgresql.ARRAY(Integer)))
self.assert_compile(
t.insert().values(data=array([1, 2, 3])),
"INSERT INTO t (data) VALUES (ARRAY[%(param_1)s, "
"%(param_2)s, %(param_3)s])",
)
def test_update_array(self):
m = MetaData()
t = Table("t", m, Column("data", postgresql.ARRAY(Integer)))
self.assert_compile(
t.update().values({t.c.data: [1, 3, 4]}),
"UPDATE t SET data=%(data)s::INTEGER[]",
checkparams={"data": [1, 3, 4]},
)
def test_update_array_element(self):
m = MetaData()
t = Table("t", m, Column("data", postgresql.ARRAY(Integer)))
self.assert_compile(
t.update().values({t.c.data[5]: 1}),
"UPDATE t SET data[%(data_1)s]=%(param_1)s",
checkparams={"data_1": 5, "param_1": 1},
)
def test_update_array_slice(self):
m = MetaData()
t = Table("t", m, Column("data", postgresql.ARRAY(Integer)))
# psycopg2-specific, has a cast
self.assert_compile(
t.update().values({t.c.data[2:5]: [2, 3, 4]}),
"UPDATE t SET data[%(data_1)s:%(data_2)s]="
"%(param_1)s::INTEGER[]",
checkparams={"param_1": [2, 3, 4], "data_2": 5, "data_1": 2},
dialect=PGDialect_psycopg2(),
)
# default dialect does not, as DBAPIs may be doing this for us
self.assert_compile(
t.update().values({t.c.data[2:5]: [2, 3, 4]}),
"UPDATE t SET data[%s:%s]=%s",
checkparams={"param_1": [2, 3, 4], "data_2": 5, "data_1": 2},
dialect=PGDialect(paramstyle="format"),
)
def test_from_only(self):
m = MetaData()
tbl1 = Table("testtbl1", m, Column("id", Integer))
tbl2 = Table("testtbl2", m, Column("id", Integer))
stmt = tbl1.select().with_hint(tbl1, "ONLY", "postgresql")
expected = "SELECT testtbl1.id FROM ONLY testtbl1"
self.assert_compile(stmt, expected)
talias1 = tbl1.alias("foo")
stmt = talias1.select().with_hint(talias1, "ONLY", "postgresql")
expected = "SELECT foo.id FROM ONLY testtbl1 AS foo"
self.assert_compile(stmt, expected)
stmt = select(tbl1, tbl2).with_hint(tbl1, "ONLY", "postgresql")
expected = (
"SELECT testtbl1.id, testtbl2.id AS id_1 FROM ONLY testtbl1, "
"testtbl2"
)
self.assert_compile(stmt, expected)
stmt = select(tbl1, tbl2).with_hint(tbl2, "ONLY", "postgresql")
expected = (
"SELECT testtbl1.id, testtbl2.id AS id_1 FROM testtbl1, ONLY "
"testtbl2"
)
self.assert_compile(stmt, expected)
stmt = select(tbl1, tbl2)
stmt = stmt.with_hint(tbl1, "ONLY", "postgresql")
stmt = stmt.with_hint(tbl2, "ONLY", "postgresql")
expected = (
"SELECT testtbl1.id, testtbl2.id AS id_1 FROM ONLY testtbl1, "
"ONLY testtbl2"
)
self.assert_compile(stmt, expected)
stmt = update(tbl1).values(dict(id=1))
stmt = stmt.with_hint("ONLY", dialect_name="postgresql")
expected = "UPDATE ONLY testtbl1 SET id=%(id)s"
self.assert_compile(stmt, expected)
stmt = delete(tbl1).with_hint(
"ONLY", selectable=tbl1, dialect_name="postgresql"
)
expected = "DELETE FROM ONLY testtbl1"
self.assert_compile(stmt, expected)
tbl3 = Table("testtbl3", m, Column("id", Integer), schema="testschema")
stmt = tbl3.select().with_hint(tbl3, "ONLY", "postgresql")
expected = (
"SELECT testschema.testtbl3.id FROM ONLY testschema.testtbl3"
)
self.assert_compile(stmt, expected)
assert_raises(
exc.CompileError,
tbl3.select().with_hint(tbl3, "FAKE", "postgresql").compile,
dialect=postgresql.dialect(),
)
def test_aggregate_order_by_one(self):
m = MetaData()
table = Table("table1", m, Column("a", Integer), Column("b", Integer))
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select(expr)
# note this tests that the object exports FROM objects
# correctly
self.assert_compile(
stmt,
"SELECT array_agg(table1.a ORDER BY table1.b DESC) "
"AS array_agg_1 FROM table1",
)
def test_aggregate_order_by_two(self):
m = MetaData()
table = Table("table1", m, Column("a", Integer), Column("b", Integer))
expr = func.string_agg(
table.c.a, aggregate_order_by(literal_column("','"), table.c.a)
)
stmt = select(expr)
self.assert_compile(
stmt,
"SELECT string_agg(table1.a, ',' ORDER BY table1.a) "
"AS string_agg_1 FROM table1",
)
def test_aggregate_order_by_multi_col(self):
m = MetaData()
table = Table("table1", m, Column("a", Integer), Column("b", Integer))
expr = func.string_agg(
table.c.a,
aggregate_order_by(
literal_column("','"), table.c.a, table.c.b.desc()
),
)
stmt = select(expr)
self.assert_compile(
stmt,
"SELECT string_agg(table1.a, "
"',' ORDER BY table1.a, table1.b DESC) "
"AS string_agg_1 FROM table1",
)
def test_aggregate_orcer_by_no_arg(self):
assert_raises_message(
TypeError,
"at least one ORDER BY element is required",
aggregate_order_by,
literal_column("','"),
)
def test_pg_array_agg_implicit_pg_array(self):
expr = pg_array_agg(column("data", Integer))
assert isinstance(expr.type, PG_ARRAY)
is_(expr.type.item_type._type_affinity, Integer)
def test_pg_array_agg_uses_base_array(self):
expr = pg_array_agg(column("data", sqltypes.ARRAY(Integer)))
assert isinstance(expr.type, sqltypes.ARRAY)
assert not isinstance(expr.type, PG_ARRAY)
is_(expr.type.item_type._type_affinity, Integer)
def test_pg_array_agg_uses_pg_array(self):
expr = pg_array_agg(column("data", PG_ARRAY(Integer)))
assert isinstance(expr.type, PG_ARRAY)
is_(expr.type.item_type._type_affinity, Integer)
def test_pg_array_agg_explicit_base_array(self):
expr = pg_array_agg(
column("data", sqltypes.ARRAY(Integer)),
type_=sqltypes.ARRAY(Integer),
)
assert isinstance(expr.type, sqltypes.ARRAY)
assert not isinstance(expr.type, PG_ARRAY)
is_(expr.type.item_type._type_affinity, Integer)
def test_pg_array_agg_explicit_pg_array(self):
expr = pg_array_agg(
column("data", sqltypes.ARRAY(Integer)), type_=PG_ARRAY(Integer)
)
assert isinstance(expr.type, PG_ARRAY)
is_(expr.type.item_type._type_affinity, Integer)
def test_aggregate_order_by_adapt(self):
m = MetaData()
table = Table("table1", m, Column("a", Integer), Column("b", Integer))
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select(expr)
a1 = table.alias("foo")
stmt2 = sql_util.ClauseAdapter(a1).traverse(stmt)
self.assert_compile(
stmt2,
"SELECT array_agg(foo.a ORDER BY foo.b DESC) AS array_agg_1 "
"FROM table1 AS foo",
)
def test_array_agg_w_filter_subscript(self):
series = func.generate_series(1, 100).alias("series")
series_col = column("series")
query = select(
func.array_agg(series_col).filter(series_col % 2 == 0)[3]
).select_from(series)
self.assert_compile(
query,
"SELECT (array_agg(series) FILTER "
"(WHERE series %% %(series_1)s = %(param_1)s))[%(param_2)s] "
"AS anon_1 FROM "
"generate_series(%(generate_series_1)s, %(generate_series_2)s) "
"AS series",
)
def test_delete_extra_froms(self):
t1 = table("t1", column("c1"))
t2 = table("t2", column("c1"))
q = delete(t1).where(t1.c.c1 == t2.c.c1)
self.assert_compile(q, "DELETE FROM t1 USING t2 WHERE t1.c1 = t2.c1")
def test_delete_extra_froms_alias(self):
a1 = table("t1", column("c1")).alias("a1")
t2 = table("t2", column("c1"))
q = delete(a1).where(a1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM t1 AS a1 USING t2 WHERE a1.c1 = t2.c1"
)
@testing.combinations(
("no_persisted", "", "ignore"),
("persisted_none", "", None),
("persisted_true", " STORED", True),
("persisted_false", " VIRTUAL", False),
id_="iaa",
)
def test_column_computed(self, text, persisted):
m = MetaData()
kwargs = {"persisted": persisted} if persisted != "ignore" else {}
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", **kwargs)),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER, y INTEGER GENERATED "
"ALWAYS AS (x + 2)%s)" % text,
)
def test_column_computed_persisted_false_old_version(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", persisted=False)),
)
old_dialect = postgresql.dialect()
old_dialect.supports_virtual_generated_columns = False
with expect_raises_message(
exc.CompileError,
"PostrgreSQL computed columns do not support 'virtual'",
):
schema.CreateTable(t).compile(dialect=old_dialect)
def test_column_computed_persisted_none_warning_old_version(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2")),
)
old_dialect = postgresql.dialect()
old_dialect.supports_virtual_generated_columns = False
with expect_warnings(
"Computed column t.y is being created as 'STORED' since"
):
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER, y INTEGER GENERATED "
"ALWAYS AS (x + 2) STORED)",
dialect=old_dialect,
)
@testing.combinations(True, False)
def test_column_identity(self, pk):
# all other tests are in test_identity_column.py
m = MetaData()
t = Table(
"t",
m,
Column(
"y",
Integer,
Identity(always=True, start=4, increment=7),
primary_key=pk,
),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (y INTEGER GENERATED ALWAYS AS IDENTITY "
"(INCREMENT BY 7 START WITH 4)%s)"
% (", PRIMARY KEY (y)" if pk else ""),
)
@testing.combinations(True, False)
def test_column_identity_no_support(self, pk):
m = MetaData()
t = Table(
"t",
m,
Column(
"y",
Integer,
Identity(always=True, start=4, increment=7),
primary_key=pk,
),
)
dd = PGDialect()
dd.supports_identity_columns = False
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (y %s%s)"
% (
"SERIAL NOT NULL" if pk else "INTEGER NOT NULL",
", PRIMARY KEY (y)" if pk else "",
),
dialect=dd,
)
def test_column_identity_null(self):
# all other tests are in test_identity_column.py
m = MetaData()
t = Table(
"t",
m,
Column(
"y",
Integer,
Identity(always=True, start=4, increment=7),
nullable=True,
),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (y INTEGER GENERATED ALWAYS AS IDENTITY "
"(INCREMENT BY 7 START WITH 4) NULL)",
)
def test_index_extra_include_1(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
idx = Index("foo", tbl.c.x, postgresql_include=["y"])
self.assert_compile(
schema.CreateIndex(idx), "CREATE INDEX foo ON test (x) INCLUDE (y)"
)
def test_index_extra_include_2(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
idx = Index("foo", tbl.c.x, postgresql_include=[tbl.c.y])
self.assert_compile(
schema.CreateIndex(idx), "CREATE INDEX foo ON test (x) INCLUDE (y)"
)
@testing.fixture
def update_tables(self):
self.weather = table(
"weather",
column("temp_lo", Integer),
column("temp_hi", Integer),
column("prcp", Integer),
column("city", String),
column("date", Date),
)
self.accounts = table(
"accounts",
column("sales_id", Integer),
column("sales_person", Integer),
column("contact_first_name", String),
column("contact_last_name", String),
column("name", String),
)
self.salesmen = table(
"salesmen",
column("id", Integer),
column("first_name", String),
column("last_name", String),
)
self.employees = table(
"employees",
column("id", Integer),
column("sales_count", String),
)
# from examples at https://www.postgresql.org/docs/current/sql-update.html
def test_difficult_update_1(self, update_tables):
update = (
self.weather.update()
.where(self.weather.c.city == "San Francisco")
.where(self.weather.c.date == "2003-07-03")
.values(
{
tuple_(
self.weather.c.temp_lo,
self.weather.c.temp_hi,
self.weather.c.prcp,
): tuple_(
self.weather.c.temp_lo + 1,
self.weather.c.temp_lo + 15,
literal_column("DEFAULT"),
)
}
)
)
self.assert_compile(
update,
"UPDATE weather SET (temp_lo, temp_hi, prcp)=(weather.temp_lo + "
"%(temp_lo_1)s, weather.temp_lo + %(temp_lo_2)s, DEFAULT) "
"WHERE weather.city = %(city_1)s AND weather.date = %(date_1)s",
{
"city_1": "San Francisco",
"date_1": "2003-07-03",
"temp_lo_1": 1,
"temp_lo_2": 15,
},
)
def test_difficult_update_2(self, update_tables):
update = self.accounts.update().values(
{
tuple_(
self.accounts.c.contact_first_name,
self.accounts.c.contact_last_name,
): select(
self.salesmen.c.first_name, self.salesmen.c.last_name
)
.where(self.salesmen.c.id == self.accounts.c.sales_id)
.scalar_subquery()
}
)
self.assert_compile(
update,
"UPDATE accounts SET (contact_first_name, contact_last_name)="
"(SELECT salesmen.first_name, salesmen.last_name FROM "
"salesmen WHERE salesmen.id = accounts.sales_id)",
)
def test_difficult_update_3(self, update_tables):
update = (
self.employees.update()
.values(
{
self.employees.c.sales_count: self.employees.c.sales_count
+ 1
}
)
.where(
self.employees.c.id
== select(self.accounts.c.sales_person)
.where(self.accounts.c.name == "Acme Corporation")
.scalar_subquery()
)
)
self.assert_compile(
update,
"UPDATE employees SET sales_count=(employees.sales_count "
"+ %(sales_count_1)s) WHERE employees.id = (SELECT "
"accounts.sales_person FROM accounts WHERE "
"accounts.name = %(name_1)s)",
{"sales_count_1": 1, "name_1": "Acme Corporation"},
)
def test_difficult_update_4(self):
summary = table(
"summary",
column("group_id", Integer),
column("sum_y", Float),
column("sum_x", Float),
column("avg_x", Float),
column("avg_y", Float),
)
data = table(
"data",
column("group_id", Integer),
column("x", Float),
column("y", Float),
)
update = summary.update().values(
{
tuple_(
summary.c.sum_x,
summary.c.sum_y,
summary.c.avg_x,
summary.c.avg_y,
): select(
func.sum(data.c.x),
func.sum(data.c.y),
func.avg(data.c.x),
func.avg(data.c.y),
)
.where(data.c.group_id == summary.c.group_id)
.scalar_subquery()
}
)
self.assert_compile(
update,
"UPDATE summary SET (sum_x, sum_y, avg_x, avg_y)="
"(SELECT sum(data.x) AS sum_1, sum(data.y) AS sum_2, "
"avg(data.x) AS avg_1, avg(data.y) AS avg_2 FROM data "
"WHERE data.group_id = summary.group_id)",
)
@testing.combinations(JSONB.JSONPathType, JSONPATH)
def test_json_path(self, type_):
data = table("data", column("id", Integer), column("x", JSONB))
stmt = select(
func.jsonb_path_exists(data.c.x, cast("$.data.w", type_))
)
self.assert_compile(
stmt,
"SELECT jsonb_path_exists(data.x, CAST(%(param_1)s AS JSONPATH)) "
"AS jsonb_path_exists_1 FROM data",
)
@testing.combinations(
(
lambda col: col["foo"] + " ",
"x[%(x_1)s] || %(param_1)s",
),
(
lambda col: col["foo"] + " " + col["bar"],
"x[%(x_1)s] || %(param_1)s || x[%(x_2)s]",
),
argnames="expr, expected",
)
def test_eager_grouping_flag(self, expr, expected):
"""test #10479"""
col = Column("x", JSONB)
expr = testing.resolve_lambda(expr, col=col)
# Choose expected result based on type
self.assert_compile(expr, expected)
@testing.variation("pgversion", ["pg14", "pg13"])
def test_jsonb_subscripting(self, pgversion):
"""test #10927 - PostgreSQL 14+ JSONB subscripting syntax"""
data = table("data", column("id", Integer), column("x", JSONB))
dialect = postgresql.dialect()
if pgversion.pg13:
dialect._supports_jsonb_subscripting = False
# Test SELECT with JSONB indexing
stmt = select(data.c.x["key"])
self.assert_compile(
stmt,
(
"SELECT data.x[%(x_1)s] AS anon_1 FROM data"
if pgversion.pg14
else "SELECT data.x -> %(x_1)s AS anon_1 FROM data"
),
dialect=dialect,
)
# Test UPDATE with JSONB indexing (the original issue case)
stmt = update(data).values({data.c.x["new_key"]: data.c.x["old_key"]})
self.assert_compile(
stmt,
(
"UPDATE data SET x[%(x_1)s]=(data.x[%(x_2)s])"
if pgversion.pg14
else "UPDATE data SET x -> %(x_1)s=(data.x -> %(x_2)s)"
),
dialect=dialect,
)
def test_json_still_uses_arrow_syntax(self):
"""test #10927 - JSON type still uses arrow syntax even on PG 14+"""
data = table("data", column("id", Integer), column("x", JSON))
# Test PostgreSQL 14+ still uses arrow syntax for JSON (not JSONB)
# Test SELECT with JSON indexing
stmt = select(data.c.x["key"])
self.assert_compile(
stmt,
"SELECT data.x -> %(x_1)s AS anon_1 FROM data",
)
# Test UPDATE with JSON indexing
stmt = update(data).values({data.c.x["new_key"]: data.c.x["old_key"]})
self.assert_compile(
stmt,
"UPDATE data SET x -> %(x_1)s=(data.x -> %(x_2)s)",
)
@testing.variation("pgversion", ["pg14", "pg13"])
def test_hstore_subscripting(self, pgversion):
"""test #12948 - PostgreSQL 14+ HSTORE subscripting syntax"""
data = table("data", column("id", Integer), column("h", HSTORE))
dialect = postgresql.dialect()
if pgversion.pg13:
dialect._supports_jsonb_subscripting = False
# Test SELECT with HSTORE indexing
stmt = select(data.c.h["key"])
self.assert_compile(
stmt,
(
"SELECT data.h[%(h_1)s] AS anon_1 FROM data"
if pgversion.pg14
else "SELECT data.h -> %(h_1)s AS anon_1 FROM data"
),
dialect=dialect,
)
# Test UPDATE with HSTORE indexing (the original issue case)
stmt = update(data).values({data.c.h["new_key"]: data.c.h["old_key"]})
self.assert_compile(
stmt,
(
"UPDATE data SET h[%(h_1)s]=(data.h[%(h_2)s])"
if pgversion.pg14
else "UPDATE data SET h -> %(h_1)s=(data.h -> %(h_2)s)"
),
dialect=dialect,
)
def test_jsonb_functions_use_parentheses_with_subscripting(self):
"""test #12778 - JSONB functions are parenthesized with [] syntax"""
data = table("data", column("id", Integer), column("x", JSONB))
# Test that JSONB functions are properly parenthesized with [] syntax
# This ensures correct PostgreSQL syntax: (function_call)[index]
# instead of the invalid: function_call[index]
stmt = select(func.jsonb_array_elements(data.c.x, type_=JSONB)["key"])
self.assert_compile(
stmt,
"SELECT "
"(jsonb_array_elements(data.x))[%(jsonb_array_elements_1)s] "
"AS anon_1 FROM data",
)
# Test with nested function calls
stmt = select(
func.jsonb_array_elements(data.c.x["items"], type_=JSONB)["key"]
)
self.assert_compile(
stmt,
"SELECT (jsonb_array_elements(data.x[%(x_1)s]))"
"[%(jsonb_array_elements_1)s] AS anon_1 FROM data",
)
def test_range_custom_object_hook(self):
# See issue #8884
from datetime import date
usages = table(
"usages",
column("id", Integer),
column("date", Date),
column("amount", Integer),
)
period = Range(date(2022, 1, 1), (2023, 1, 1))
stmt = select(func.sum(usages.c.amount)).where(
usages.c.date.op("<@")(period)
)
self.assert_compile(
stmt,
"SELECT sum(usages.amount) AS sum_1 FROM usages "
"WHERE usages.date <@ %(date_1)s::DATERANGE",
)
def test_multirange_custom_object_hook(self):
from datetime import date
usages = table(
"usages",
column("id", Integer),
column("date", Date),
column("amount", Integer),
)
period = MultiRange(
[
Range(date(2022, 1, 1), (2023, 1, 1)),
Range(date(2024, 1, 1), (2025, 1, 1)),
]
)
stmt = select(func.sum(usages.c.amount)).where(
usages.c.date.op("<@")(period)
)
self.assert_compile(
stmt,
"SELECT sum(usages.amount) AS sum_1 FROM usages "
"WHERE usages.date <@ %(date_1)s::DATEMULTIRANGE",
)
def test_bitwise_xor(self):
c1 = column("c1", Integer)
c2 = column("c2", Integer)
self.assert_compile(
select(c1.bitwise_xor(c2)),
"SELECT c1 # c2 AS anon_1",
)
def test_ilike_escaping(self):
dialect = postgresql.dialect()
self.assert_compile(
sql.column("foo").ilike("bar", escape="\\"),
"foo ILIKE %(foo_1)s ESCAPE '\\\\'",
)
self.assert_compile(
sql.column("foo").ilike("bar", escape=""),
"foo ILIKE %(foo_1)s ESCAPE ''",
dialect=dialect,
)
self.assert_compile(
sql.column("foo").notilike("bar", escape="\\"),
"foo NOT ILIKE %(foo_1)s ESCAPE '\\\\'",
)
self.assert_compile(
sql.column("foo").notilike("bar", escape=""),
"foo NOT ILIKE %(foo_1)s ESCAPE ''",
dialect=dialect,
)
@testing.combinations(
(lambda t: t.c.a**t.c.b, "power(t.a, t.b)", {}),
(lambda t: t.c.a**3, "power(t.a, %(pow_1)s)", {"pow_1": 3}),
(lambda t: func.pow(t.c.a, 3), "power(t.a, %(pow_1)s)", {"pow_1": 3}),
(lambda t: func.power(t.c.a, t.c.b), "power(t.a, t.b)", {}),
)
def test_simple_compile(self, fn, string, params):
t = table("t", column("a", Integer), column("b", Integer))
expr = resolve_lambda(fn, t=t)
self.assert_compile(expr, string, params)
| CompileTest |
python | getsentry__sentry | tests/sentry/api/test_authentication.py | {
"start": 2152,
"end": 4097
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.auth = ClientIdSecretAuthentication()
self.org = self.create_organization(owner=self.user)
self.sentry_app = self.create_sentry_app(name="foo", organization=self.org)
self.api_app = self.sentry_app.application
def test_authenticate(self) -> None:
request = _drf_request(
{
"client_id": self.api_app.client_id,
"client_secret": self.api_app.client_secret,
}
)
user, _ = self.auth.authenticate(request)
assert user.id == self.sentry_app.proxy_user.id
def test_without_json_body(self) -> None:
request = _drf_request()
with pytest.raises(AuthenticationFailed):
self.auth.authenticate(request)
def test_missing_client_id(self) -> None:
request = _drf_request({"client_secret": self.api_app.client_secret})
with pytest.raises(AuthenticationFailed):
self.auth.authenticate(request)
def test_missing_client_secret(self) -> None:
request = _drf_request({"client_id": self.api_app.client_id})
with pytest.raises(AuthenticationFailed):
self.auth.authenticate(request)
def test_incorrect_client_id(self) -> None:
request = _drf_request(
{
"client_id": "notit",
"client_secret": self.api_app.client_secret,
}
)
with pytest.raises(AuthenticationFailed):
self.auth.authenticate(request)
def test_incorrect_client_secret(self) -> None:
request = _drf_request(
{
"client_id": self.api_app.client_id,
"client_secret": "notit",
}
)
with pytest.raises(AuthenticationFailed):
self.auth.authenticate(request)
@control_silo_test
| TestClientIdSecretAuthentication |
python | pypa__pip | tests/unit/test_vcs.py | {
"start": 34967,
"end": 38342
} | class ____(TestCase):
def setUp(self) -> None:
patcher = mock.patch("pip._internal.vcs.versioncontrol.call_subprocess")
self.addCleanup(patcher.stop)
self.call_subprocess_mock = patcher.start()
# Test Data.
self.url = "hg+http://username:password@hg.example.com/"
self.svn = Mercurial()
self.rev_options = RevOptions(Mercurial)
self.dest = "/tmp/test"
def test_fetch_new(self) -> None:
self.svn.fetch_new(self.dest, hide_url(self.url), self.rev_options, verbosity=1)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"hg",
"clone",
"--noupdate",
hide_url("hg+http://username:password@hg.example.com/"),
"/tmp/test",
]
assert self.call_subprocess_mock.call_args_list[1][0][0] == [
"hg",
"update",
]
def test_fetch_new_quiet(self) -> None:
self.svn.fetch_new(self.dest, hide_url(self.url), self.rev_options, verbosity=0)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"hg",
"clone",
"--noupdate",
"--quiet",
hide_url("hg+http://username:password@hg.example.com/"),
"/tmp/test",
]
assert self.call_subprocess_mock.call_args_list[1][0][0] == [
"hg",
"update",
"--quiet",
]
def test_fetch_new_very_verbose(self) -> None:
self.svn.fetch_new(self.dest, hide_url(self.url), self.rev_options, verbosity=2)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"hg",
"clone",
"--noupdate",
"--verbose",
hide_url("hg+http://username:password@hg.example.com/"),
"/tmp/test",
]
assert self.call_subprocess_mock.call_args_list[1][0][0] == [
"hg",
"update",
"--verbose",
]
def test_fetch_new_debug(self) -> None:
self.svn.fetch_new(self.dest, hide_url(self.url), self.rev_options, verbosity=3)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"hg",
"clone",
"--noupdate",
"--verbose",
"--debug",
hide_url("hg+http://username:password@hg.example.com/"),
"/tmp/test",
]
assert self.call_subprocess_mock.call_args_list[1][0][0] == [
"hg",
"update",
"--verbose",
"--debug",
]
def test_update(self) -> None:
self.svn.update(self.dest, hide_url(self.url), self.rev_options, verbosity=1)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"hg",
"pull",
]
assert self.call_subprocess_mock.call_args_list[1][0][0] == [
"hg",
"update",
]
def test_update_quiet(self) -> None:
self.svn.update(self.dest, hide_url(self.url), self.rev_options, verbosity=0)
assert self.call_subprocess_mock.call_args_list[0][0][0] == [
"hg",
"pull",
"-q",
]
assert self.call_subprocess_mock.call_args_list[1][0][0] == [
"hg",
"update",
"-q",
]
| TestMercurialArgs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.