language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pydantic__pydantic | pydantic/types.py | {
"start": 16161,
"end": 16665
} | class ____(BaseModel):
strict_float: StrictFloat
try:
StrictFloatModel(strict_float='1.0')
except ValidationError as e:
print(e)
'''
1 validation error for StrictFloatModel
strict_float
Input should be a valid number [type=float_type, input_value='1.0', input_type=str]
'''
```
"""
FiniteFloat = Annotated[float, AllowInfNan(False)]
"""A float that must be finite (not ``-inf``, ``inf``, or ``nan``).
```python
from pydantic import BaseModel, FiniteFloat
| StrictFloatModel |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/stack.py | {
"start": 152,
"end": 1467
} | class ____:
parent: Optional["EvaluationStackEntry"]
@property
def entries(self) -> Sequence["EvaluationStackEntry"]:
return list(self.iter_entries())
def iter_entries(self) -> Iterator["EvaluationStackEntry"]:
if self.parent:
yield from self.parent.iter_entries()
if self:
yield self
@property
def levels(self) -> Sequence[str]:
return [
entry.field_name
for entry in self.entries
if isinstance(entry, EvaluationStackPathEntry)
]
def for_field(self, field_name: str) -> "EvaluationStackEntry":
return EvaluationStackPathEntry(
field_name=field_name,
parent=self,
)
def for_array_index(self, list_index: int) -> "EvaluationStackEntry":
return EvaluationStackListItemEntry(
list_index=list_index,
parent=self,
)
def for_map_key(self, map_key: object) -> "EvaluationStackEntry":
return EvaluationStackMapKeyEntry(
map_key=map_key,
parent=self,
)
def for_map_value(self, map_key: object) -> "EvaluationStackEntry":
return EvaluationStackMapValueEntry(
map_key=map_key,
parent=self,
)
@record
| EvaluationStackEntry |
python | numba__numba | numba/cuda/cudadecl.py | {
"start": 5501,
"end": 5671
} | class ____(ConcreteTemplate):
key = cuda.fp16.hfma
cases = [
signature(types.float16, types.float16, types.float16, types.float16)
]
@register
| Cuda_hfma |
python | gevent__gevent | src/greentest/3.14/test_urllib2.py | {
"start": 74306,
"end": 79603
} | class ____(unittest.TestCase):
def opener_has_handler(self, opener, handler_class):
self.assertTrue(any(h.__class__ == handler_class
for h in opener.handlers))
def test_build_opener(self):
class MyHTTPHandler(urllib.request.HTTPHandler):
pass
class FooHandler(urllib.request.BaseHandler):
def foo_open(self):
pass
class BarHandler(urllib.request.BaseHandler):
def bar_open(self):
pass
build_opener = urllib.request.build_opener
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, urllib.request.HTTPHandler)
o = build_opener(urllib.request.HTTPHandler)
self.opener_has_handler(o, urllib.request.HTTPHandler)
o = build_opener(urllib.request.HTTPHandler())
self.opener_has_handler(o, urllib.request.HTTPHandler)
# Issue2670: multiple handlers sharing the same base class
class MyOtherHTTPHandler(urllib.request.HTTPHandler):
pass
o = build_opener(MyHTTPHandler, MyOtherHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
self.opener_has_handler(o, MyOtherHTTPHandler)
def test_HTTPError_interface(self):
"""
Issue 13211 reveals that HTTPError didn't implement the URLError
interface even though HTTPError is a subclass of URLError.
"""
msg = 'something bad happened'
url = code = fp = None
hdrs = 'Content-Length: 42'
err = urllib.error.HTTPError(url, code, msg, hdrs, fp)
self.assertHasAttr(err, 'reason')
self.assertEqual(err.reason, 'something bad happened')
self.assertHasAttr(err, 'headers')
self.assertEqual(err.headers, 'Content-Length: 42')
expected_errmsg = 'HTTP Error %s: %s' % (err.code, err.msg)
self.assertEqual(str(err), expected_errmsg)
expected_errmsg = '<HTTPError %s: %r>' % (err.code, err.msg)
self.assertEqual(repr(err), expected_errmsg)
err.close()
def test_gh_98778(self):
x = urllib.error.HTTPError("url", 405, "METHOD NOT ALLOWED", None, None)
self.assertEqual(getattr(x, "__notes__", ()), ())
self.assertIsInstance(x.fp.read(), bytes)
x.close()
def test_parse_proxy(self):
parse_proxy_test_cases = [
('proxy.example.com',
(None, None, None, 'proxy.example.com')),
('proxy.example.com:3128',
(None, None, None, 'proxy.example.com:3128')),
('proxy.example.com', (None, None, None, 'proxy.example.com')),
('proxy.example.com:3128',
(None, None, None, 'proxy.example.com:3128')),
# The authority component may optionally include userinfo
# (assumed to be # username:password):
('joe:password@proxy.example.com',
(None, 'joe', 'password', 'proxy.example.com')),
('joe:password@proxy.example.com:3128',
(None, 'joe', 'password', 'proxy.example.com:3128')),
#Examples with URLS
('http://proxy.example.com/',
('http', None, None, 'proxy.example.com')),
('http://proxy.example.com:3128/',
('http', None, None, 'proxy.example.com:3128')),
('http://joe:password@proxy.example.com/',
('http', 'joe', 'password', 'proxy.example.com')),
('http://joe:password@proxy.example.com:3128',
('http', 'joe', 'password', 'proxy.example.com:3128')),
# Everything after the authority is ignored
('ftp://joe:password@proxy.example.com/rubbish:3128',
('ftp', 'joe', 'password', 'proxy.example.com')),
# Test for no trailing '/' case
('http://joe:password@proxy.example.com',
('http', 'joe', 'password', 'proxy.example.com')),
# Testcases with '/' character in username, password
('http://user/name:password@localhost:22',
('http', 'user/name', 'password', 'localhost:22')),
('http://username:pass/word@localhost:22',
('http', 'username', 'pass/word', 'localhost:22')),
('http://user/name:pass/word@localhost:22',
('http', 'user/name', 'pass/word', 'localhost:22')),
]
for tc, expected in parse_proxy_test_cases:
self.assertEqual(_parse_proxy(tc), expected)
self.assertRaises(ValueError, _parse_proxy, 'file:/ftp.example.com'),
skip_libssl_fips_mode = unittest.skipIf(
support.is_libssl_fips_mode(),
"conservative skip due to OpenSSL FIPS mode possible algorithm nerfing",
)
| MiscTests |
python | Textualize__textual | tests/deadlock.py | {
"start": 134,
"end": 323
} | class ____(App[None]):
BINDINGS = [
Binding(key="q", action="quit", description="Quit the app"),
]
def compose(self):
yield Footer()
app = MyApp()
app.run()
| MyApp |
python | h5py__h5py | h5py/tests/test_h5p.py | {
"start": 4194,
"end": 5262
} | class ____(TestCase):
'''
Feature: setting/getting mdc config on a file access property list
'''
def test_mdc_config(self):
'''test get/set mdc config '''
falist = h5p.create(h5p.FILE_ACCESS)
config = falist.get_mdc_config()
falist.set_mdc_config(config)
def test_set_alignment(self):
'''test get/set chunk cache '''
falist = h5p.create(h5p.FILE_ACCESS)
threshold = 10 * 1024 # threshold of 10kiB
alignment = 1024 * 1024 # threshold of 1kiB
falist.set_alignment(threshold, alignment)
self.assertEqual((threshold, alignment),
falist.get_alignment())
def test_set_file_locking(self):
'''test get/set file locking'''
falist = h5p.create(h5p.FILE_ACCESS)
use_file_locking = False
ignore_when_disabled = False
falist.set_file_locking(use_file_locking, ignore_when_disabled)
self.assertEqual((use_file_locking, ignore_when_disabled),
falist.get_file_locking())
| TestFA |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/client/utils.py | {
"start": 1475,
"end": 2018
} | class ____(NamedTuple):
"""This class gives information about the result of shutting down the server for
a Dagster repository location using a GraphQL mutation.
Args:
status (ShutdownRepositoryLocationStatus) Whether the shutdown succeeded or failed.
message (Optional[str], optional): the failure message/reason if
`status == ShutdownRepositoryLocationStatus.FAILURE`. Defaults to None.
"""
status: ShutdownRepositoryLocationStatus
message: Optional[str] = None
| ShutdownRepositoryLocationInfo |
python | lazyprogrammer__machine_learning_examples | rl3/es_flappy.py | {
"start": 1805,
"end": 6111
} | class ____:
def __init__(self, D, M, K, f=relu):
self.D = D
self.M = M
self.K = K
self.f = f
def init(self):
D, M, K = self.D, self.M, self.K
self.W1 = np.random.randn(D, M) / np.sqrt(D)
# self.W1 = np.zeros((D, M))
self.b1 = np.zeros(M)
self.W2 = np.random.randn(M, K) / np.sqrt(M)
# self.W2 = np.zeros((M, K))
self.b2 = np.zeros(K)
def forward(self, X):
Z = self.f(X.dot(self.W1) + self.b1)
return softmax(Z.dot(self.W2) + self.b2)
def sample_action(self, x):
# assume input is a single state of size (D,)
# first make it (N, D) to fit ML conventions
X = np.atleast_2d(x)
P = self.forward(X)
p = P[0] # the first row
# return np.random.choice(len(p), p=p)
return np.argmax(p)
def get_params(self):
# return a flat array of parameters
return np.concatenate([self.W1.flatten(), self.b1, self.W2.flatten(), self.b2])
def get_params_dict(self):
return {
'W1': self.W1,
'b1': self.b1,
'W2': self.W2,
'b2': self.b2,
}
def set_params(self, params):
# params is a flat list
# unflatten into individual weights
D, M, K = self.D, self.M, self.K
self.W1 = params[:D * M].reshape(D, M)
self.b1 = params[D * M:D * M + M]
self.W2 = params[D * M + M:D * M + M + M * K].reshape(M, K)
self.b2 = params[-K:]
def evolution_strategy(
f,
population_size,
sigma,
lr,
initial_params,
num_iters):
# assume initial params is a 1-D array
num_params = len(initial_params)
reward_per_iteration = np.zeros(num_iters)
params = initial_params
for t in range(num_iters):
t0 = datetime.now()
N = np.random.randn(population_size, num_params)
### slow way
R = np.zeros(population_size) # stores the reward
# loop through each "offspring"
for j in range(population_size):
params_try = params + sigma*N[j]
R[j] = f(params_try)
### fast way
# R = pool.map(f, [params + sigma*N[j] for j in range(population_size)])
# R = np.array(R)
m = R.mean()
s = R.std()
if s == 0:
# we can't apply the following equation
print("Skipping")
continue
A = (R - m) / s
reward_per_iteration[t] = m
params = params + lr/(population_size*sigma) * np.dot(N.T, A)
# update the learning rate
lr *= 0.992354
# sigma *= 0.99
print("Iter:", t, "Avg Reward: %.3f" % m, "Max:", R.max(), "Duration:", (datetime.now() - t0))
return params, reward_per_iteration
def reward_function(params):
model = ANN(D, M, K)
model.set_params(params)
# play one episode and return the total reward
episode_reward = 0
episode_length = 0 # not sure if it will be used
done = False
obs = env.reset()
obs_dim = len(obs)
if HISTORY_LENGTH > 1:
state = np.zeros(HISTORY_LENGTH*obs_dim) # current state
state[-obs_dim:] = obs
else:
state = obs
while not done:
# get the action
action = model.sample_action(state)
# perform the action
obs, reward, done = env.step(action)
# update total reward
episode_reward += reward
episode_length += 1
# update state
if HISTORY_LENGTH > 1:
state = np.roll(state, -obs_dim)
state[-obs_dim:] = obs
else:
state = obs
return episode_reward
if __name__ == '__main__':
model = ANN(D, M, K)
if len(sys.argv) > 1 and sys.argv[1] == 'play':
# play with a saved model
j = np.load('es_flappy_results.npz')
best_params = np.concatenate([j['W1'].flatten(), j['b1'], j['W2'].flatten(), j['b2']])
# in case initial shapes are not correct
D, M = j['W1'].shape
K = len(j['b2'])
model.D, model.M, model.K = D, M, K
else:
# train and save
model.init()
params = model.get_params()
best_params, rewards = evolution_strategy(
f=reward_function,
population_size=30,
sigma=0.1,
lr=0.03,
initial_params=params,
num_iters=300,
)
# plot the rewards per iteration
# plt.plot(rewards)
# plt.show()
model.set_params(best_params)
np.savez(
'es_flappy_results.npz',
train=rewards,
**model.get_params_dict(),
)
# play 5 test episodes
env.set_display(True)
for _ in range(5):
print("Test:", reward_function(best_params))
| ANN |
python | PrefectHQ__prefect | tests/blocks/test_abstract.py | {
"start": 2020,
"end": 3875
} | class ____:
def test_job_block_is_abstract(self):
with pytest.raises(
TypeError, match="Can't instantiate abstract class JobBlock"
):
JobBlock()
def test_job_block_implementation(self, caplog):
class AJobRun(JobRun):
def __init__(self):
super().__init__()
self.status = "running"
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def wait_for_completion(self):
self.status = "completed"
self.logger.info("Job run completed.")
def fetch_result(self):
if self.status != "completed":
raise JobRunIsRunning("Job run is still running.")
return "results"
class AJobBlock(JobBlock):
def trigger(self):
self.logger.info("Job run triggered.")
return AJobRun()
a_job_block = AJobBlock()
a_job_run = a_job_block.trigger()
# test wait_for_completion and fetch_result
with pytest.raises(JobRunIsRunning, match="Job run is still running."):
a_job_run.fetch_result()
assert a_job_run.wait_for_completion() is None
assert a_job_run.fetch_result() == "results"
# test logging
assert hasattr(a_job_block, "logger")
assert hasattr(a_job_run, "logger")
assert len(caplog.records) == 2
record_1 = caplog.records[0]
assert record_1.name == "prefect.AJobBlock"
assert record_1.msg == "Job run triggered."
record_2 = caplog.records[1]
assert record_2.name == "prefect.AJobRun"
assert record_2.msg == "Job run completed."
| TestJobBlock |
python | allegroai__clearml | clearml/backend_config/config.py | {
"start": 1080,
"end": 1461
} | class ____(Entry):
logger = None
def __init__(self, config: "Config", *keys: Text, **kwargs: Any) -> None:
super(ConfigEntry, self).__init__(*keys, **kwargs)
self.config = config
def _get(self, key: Text) -> Any:
return self.config.get(key, NotSet)
def error(self, message: Text) -> None:
log.error(message.capitalize())
| ConfigEntry |
python | tornadoweb__tornado | tornado/test/routing_test.py | {
"start": 1535,
"end": 1812
} | class ____(AsyncHTTPTestCase):
def get_app(self):
return BasicRouter()
def test_basic_router(self):
response = self.fetch("/any_request")
self.assertEqual(response.body, b"OK")
resources = {} # type: typing.Dict[str, bytes]
| BasicRouterTestCase |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 77432,
"end": 79093
} | class ____(Request):
"""
Delete dataviews
:param ids: IDs of the dataviews to delete
:type ids: Sequence[str]
:param force: Allow deletion of published dataviews
:type force: bool
"""
_service = "dataviews"
_action = "delete_many"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "Allow deletion of published dataviews",
"type": "boolean",
},
"ids": {
"description": "IDs of the dataviews to delete",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(self, ids, force=False, **kwargs):
super(DeleteManyRequest, self).__init__(**kwargs)
self.ids = ids
self.force = force
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
| DeleteManyRequest |
python | sanic-org__sanic | sanic/touchup/service.py | {
"start": 77,
"end": 907
} | class ____:
_registry: set[tuple[type, str]] = set()
@classmethod
def run(cls, app):
for target, method_name in cls._registry:
method = getattr(target, method_name)
if app.test_mode:
placeholder = f"_{method_name}"
if hasattr(target, placeholder):
method = getattr(target, placeholder)
else:
setattr(target, placeholder, method)
module = getmodule(target)
module_globals = dict(getmembers(module))
modified = BaseScheme.build(method, module_globals, app)
setattr(target, method_name, modified)
target.__touched__ = True
@classmethod
def register(cls, target, method_name):
cls._registry.add((target, method_name))
| TouchUp |
python | apache__airflow | providers/snowflake/tests/unit/snowflake/hooks/test_snowflake_sql_api.py | {
"start": 6726,
"end": 60056
} | class ____:
@pytest.mark.parametrize(
("sql", "statement_count", "expected_response", "expected_query_ids"),
[
(SINGLE_STMT, 1, {"statementHandle": "uuid"}, ["uuid"]),
(SQL_MULTIPLE_STMTS, 4, {"statementHandles": ["uuid", "uuid1"]}, ["uuid", "uuid1"]),
],
)
@mock.patch(f"{HOOK_PATH}._get_conn_params", new_callable=PropertyMock)
@mock.patch(f"{HOOK_PATH}.get_headers")
def test_execute_query(
self,
mock_get_header,
mock_conn_param,
sql,
statement_count,
expected_response,
expected_query_ids,
mock_requests,
):
"""Test execute_query method, run query by mocking post request method and return the query ids"""
mock_requests.codes.ok = 200
mock_requests.request.side_effect = [
create_successful_response_mock(expected_response),
]
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.request.return_value).status_code = status_code_mock
hook = SnowflakeSqlApiHook("mock_conn_id")
query_ids = hook.execute_query(sql, statement_count)
assert query_ids == expected_query_ids
@mock.patch(f"{HOOK_PATH}._get_conn_params", new_callable=PropertyMock)
@mock.patch(f"{HOOK_PATH}.get_headers")
def test_execute_query_multiple_times_give_fresh_query_ids_each_time(
self, mock_get_header, mock_conn_param, mock_requests
):
"""Test execute_query method, run query by mocking post request method and return the query ids"""
sql, statement_count, expected_response, expected_query_ids = (
SQL_MULTIPLE_STMTS,
4,
{"statementHandles": ["uuid2", "uuid3"]},
["uuid2", "uuid3"],
)
mock_requests.codes.ok = 200
mock_requests.request.side_effect = [
create_successful_response_mock(expected_response),
]
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.request.return_value).status_code = status_code_mock
hook = SnowflakeSqlApiHook("mock_conn_id")
query_ids = hook.execute_query(sql, statement_count)
assert query_ids == expected_query_ids
sql, statement_count, expected_response, expected_query_ids = (
SINGLE_STMT,
1,
{"statementHandle": "uuid"},
["uuid"],
)
mock_requests.request.side_effect = [
create_successful_response_mock(expected_response),
]
query_ids = hook.execute_query(sql, statement_count)
assert query_ids == expected_query_ids
@pytest.mark.parametrize(
("sql", "statement_count", "expected_response", "expected_query_ids"),
[(SINGLE_STMT, 1, {"statementHandle": "uuid"}, ["uuid"])],
)
@mock.patch(f"{HOOK_PATH}._get_conn_params", new_callable=PropertyMock)
@mock.patch(f"{HOOK_PATH}.get_headers")
def test_execute_query_exception_without_statement_handle(
self,
mock_get_header,
mock_conn_param,
sql,
statement_count,
expected_response,
expected_query_ids,
mock_requests,
):
"""
Test execute_query method by mocking the exception response and raise airflow exception
without statementHandle in the response
"""
side_effect = create_post_side_effect()
mock_requests.request.side_effect = side_effect
hook = SnowflakeSqlApiHook("mock_conn_id")
with pytest.raises(AirflowException) as exception_info:
hook.execute_query(sql, statement_count)
assert exception_info
@pytest.mark.parametrize(
("sql", "statement_count", "bindings"),
[
(SQL_MULTIPLE_STMTS, 4, {"1": {"type": "FIXED", "value": "123"}}),
],
)
@mock.patch(f"{HOOK_PATH}._get_conn_params", new_callable=PropertyMock)
@mock.patch(f"{HOOK_PATH}.get_headers")
def test_execute_query_bindings_warning(
self,
mock_get_headers,
mock_conn_params,
sql,
statement_count,
bindings,
mock_requests,
):
"""Test execute_query method logs warning when bindings are provided for multi-statement queries"""
mock_conn_params.return_value = CONN_PARAMS
mock_get_headers.return_value = HEADERS
mock_requests.request.return_value = create_successful_response_mock(
{"statementHandles": ["uuid", "uuid1"]}
)
hook = SnowflakeSqlApiHook(snowflake_conn_id="mock_conn_id")
with mock.patch.object(hook.log, "warning") as mock_log_warning:
hook.execute_query(sql, statement_count, bindings=bindings)
mock_log_warning.assert_called_once_with(
"Bindings are not supported for multi-statement queries. Bindings will be ignored."
)
@pytest.mark.parametrize(
"query_ids",
[
(["uuid", "uuid1"]),
],
)
@mock.patch(f"{HOOK_PATH}.get_request_url_header_params")
def test_check_query_output(self, mock_geturl_header_params, query_ids, mock_requests):
"""Test check_query_output by passing query ids as params and mock get_request_url_header_params"""
req_id = uuid.uuid4()
params = {"requestId": str(req_id), "page": 2, "pageSize": 10}
mock_geturl_header_params.return_value = HEADERS, params, "/test/airflow/"
mock_requests.request.return_value.json.return_value = GET_RESPONSE
hook = SnowflakeSqlApiHook("mock_conn_id")
with mock.patch.object(hook.log, "info") as mock_log_info:
hook.check_query_output(query_ids)
mock_log_info.assert_called_with(GET_RESPONSE)
@pytest.mark.parametrize("query_ids", [["uuid", "uuid1"]])
@mock.patch(f"{HOOK_PATH}.get_request_url_header_params")
def test_check_query_output_exception(
self,
mock_geturl_header_params,
query_ids,
mock_requests,
):
"""
Test check_query_output by passing query ids as params and mock get_request_url_header_params
to raise airflow exception and mock with http error
"""
req_id = uuid.uuid4()
params = {"requestId": str(req_id), "page": 2, "pageSize": 10}
mock_geturl_header_params.return_value = HEADERS, params, "https://test/airflow/"
custom_retry_args = {
"stop": tenacity.stop_after_attempt(2), # Only 2 attempts instead of default 5
}
hook = SnowflakeSqlApiHook("mock_conn_id", api_retry_args=custom_retry_args)
mock_requests.request.side_effect = [create_post_side_effect(status_code=500)] * 3
with pytest.raises(requests.exceptions.HTTPError):
hook.check_query_output(query_ids)
@mock.patch(f"{HOOK_PATH}._get_conn_params", new_callable=PropertyMock)
@mock.patch(f"{HOOK_PATH}.get_headers")
def test_get_request_url_header_params(self, mock_get_header, mock_conn_param):
"""Test get_request_url_header_params by mocking _get_conn_params and get_headers"""
mock_conn_param.return_value = CONN_PARAMS
mock_get_header.return_value = HEADERS
hook = SnowflakeSqlApiHook("mock_conn_id")
header, params, url = hook.get_request_url_header_params("uuid")
assert header == HEADERS
assert url == "https://airflow.af_region.snowflakecomputing.com/api/v2/statements/uuid"
@mock.patch(f"{HOOK_PATH}.get_private_key")
@mock.patch(f"{HOOK_PATH}._get_conn_params", new_callable=PropertyMock)
@mock.patch("airflow.providers.snowflake.utils.sql_api_generate_jwt.JWTGenerator.get_token")
def test_get_headers_should_support_private_key(self, mock_get_token, mock_conn_param, mock_private_key):
"""Test get_headers method by mocking get_private_key and _get_conn_params method"""
mock_get_token.return_value = "newT0k3n"
mock_conn_param.return_value = CONN_PARAMS
hook = SnowflakeSqlApiHook(snowflake_conn_id="mock_conn_id")
result = hook.get_headers()
assert result == HEADERS
@mock.patch(f"{HOOK_PATH}.get_oauth_token")
@mock.patch(f"{HOOK_PATH}._get_conn_params", new_callable=PropertyMock)
def test_get_headers_should_support_oauth(self, mock_conn_param, mock_oauth_token):
"""Test get_headers method by mocking get_oauth_token and _get_conn_params method"""
mock_conn_param.return_value = CONN_PARAMS_OAUTH
mock_oauth_token.return_value = "newT0k3n"
hook = SnowflakeSqlApiHook(snowflake_conn_id="mock_conn_id")
result = hook.get_headers()
assert result == HEADERS_OAUTH
@mock.patch("airflow.providers.snowflake.hooks.snowflake.HTTPBasicAuth")
@mock.patch("requests.post")
@mock.patch(f"{HOOK_PATH}._get_conn_params", new_callable=PropertyMock)
def test_get_oauth_token(self, mock_conn_param, requests_post, mock_auth):
"""Test get_oauth_token method makes the right http request"""
basic_auth = {"Authorization": "Basic usernamepassword"}
mock_conn_param.return_value = CONN_PARAMS_OAUTH
requests_post.return_value.status_code = 200
mock_auth.return_value = basic_auth
hook = SnowflakeSqlApiHook(snowflake_conn_id="mock_conn_id")
with pytest.warns(expected_warning=AirflowProviderDeprecationWarning):
hook.get_oauth_token(CONN_PARAMS_OAUTH)
requests_post.assert_called_once_with(
f"https://{CONN_PARAMS_OAUTH['account']}.snowflakecomputing.com/oauth/token-request",
data={
"grant_type": "refresh_token",
"refresh_token": CONN_PARAMS_OAUTH["refresh_token"],
"redirect_uri": "https://localhost.com",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
auth=basic_auth,
)
@pytest.fixture
def unencrypted_temporary_private_key(self, tmp_path: Path) -> Path:
"""Encrypt the pem file from the path"""
key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, key_size=2048)
private_key = key.private_bytes(
serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()
)
test_key_file = tmp_path / "test_key.pem"
test_key_file.write_bytes(private_key)
return test_key_file
@pytest.fixture
def base64_encoded_unencrypted_private_key(self, unencrypted_temporary_private_key: Path) -> str:
return base64.b64encode(unencrypted_temporary_private_key.read_bytes()).decode("utf-8")
@pytest.fixture
def encrypted_temporary_private_key(self, tmp_path: Path) -> Path:
"""Encrypt private key from the temp path"""
key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, key_size=2048)
private_key = key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption(_PASSWORD.encode()),
)
test_key_file: Path = tmp_path / "test_key.p8"
test_key_file.write_bytes(private_key)
return test_key_file
@pytest.fixture
def base64_encoded_encrypted_private_key(self, encrypted_temporary_private_key: Path) -> str:
return base64.b64encode(encrypted_temporary_private_key.read_bytes()).decode("utf-8")
def test_get_private_key_should_support_private_auth_in_connection(
self, base64_encoded_encrypted_private_key: str
):
"""Test get_private_key function with private_key_content in connection"""
connection_kwargs: Any = {
**BASE_CONNECTION_KWARGS,
"password": _PASSWORD,
"extra": {
"database": "db",
"account": "airflow",
"warehouse": "af_wh",
"region": "af_region",
"role": "af_role",
"private_key_content": base64_encoded_encrypted_private_key,
},
}
with unittest.mock.patch.dict(
"os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()
):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook.get_private_key()
assert hook.private_key is not None
def test_get_private_key_raise_exception(
self, encrypted_temporary_private_key: Path, base64_encoded_encrypted_private_key: str
):
"""
Test get_private_key function with private_key_content and private_key_file in connection
and raise airflow exception
"""
connection_kwargs: Any = {
**BASE_CONNECTION_KWARGS,
"password": _PASSWORD,
"extra": {
"database": "db",
"account": "airflow",
"warehouse": "af_wh",
"region": "af_region",
"role": "af_role",
"private_key_content": base64_encoded_encrypted_private_key,
"private_key_file": str(encrypted_temporary_private_key),
},
}
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
with (
unittest.mock.patch.dict(
"os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()
),
pytest.raises(
AirflowException,
match="The private_key_file and private_key_content extra fields are mutually "
"exclusive. Please remove one.",
),
):
hook.get_private_key()
def test_get_private_key_should_support_private_auth_with_encrypted_key(
self, encrypted_temporary_private_key
):
"""Test get_private_key method by supporting for private auth encrypted_key"""
connection_kwargs = {
**BASE_CONNECTION_KWARGS,
"password": _PASSWORD,
"extra": {
"database": "db",
"account": "airflow",
"warehouse": "af_wh",
"region": "af_region",
"role": "af_role",
"private_key_file": str(encrypted_temporary_private_key),
},
}
with unittest.mock.patch.dict(
"os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()
):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook.get_private_key()
assert hook.private_key is not None
def test_get_private_key_should_support_private_auth_with_unencrypted_key(
self,
unencrypted_temporary_private_key,
):
connection_kwargs = {
**BASE_CONNECTION_KWARGS,
"password": None,
"extra": {
"database": "db",
"account": "airflow",
"warehouse": "af_wh",
"region": "af_region",
"role": "af_role",
"private_key_file": str(unencrypted_temporary_private_key),
},
}
with unittest.mock.patch.dict(
"os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()
):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook.get_private_key()
assert hook.private_key is not None
connection_kwargs["password"] = ""
with unittest.mock.patch.dict(
"os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()
):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook.get_private_key()
assert hook.private_key is not None
connection_kwargs["password"] = _PASSWORD
with (
unittest.mock.patch.dict(
"os.environ", AIRFLOW_CONN_TEST_CONN=Connection(**connection_kwargs).get_uri()
),
pytest.raises(TypeError, match="Password was given but private key is not encrypted."),
):
SnowflakeSqlApiHook(snowflake_conn_id="test_conn").get_private_key()
@pytest.mark.parametrize(
("status_code", "response", "expected_response"),
[
(
200,
{
"status": "success",
"message": "Statement executed successfully.",
"statementHandle": "uuid",
},
{
"status": "success",
"message": "Statement executed successfully.",
"statement_handles": ["uuid"],
},
),
(
200,
{
"status": "success",
"message": "Statement executed successfully.",
"statementHandles": ["uuid", "uuid1"],
},
{
"status": "success",
"message": "Statement executed successfully.",
"statement_handles": ["uuid", "uuid1"],
},
),
(202, {}, {"status": "running", "message": "Query statements are still running"}),
(422, {"message": "test"}, {"status": "error", "message": "test"}),
(
422,
{
"message": "SQL compilation error",
"code": "000904",
"sqlState": "42000",
"statementHandle": "handle123",
},
{
"status": "error",
"message": "SQL compilation error (Code: 000904, SQL State: 42000, Statement Handle: handle123)",
},
),
(404, {"status": "error", "message": "test"}, {"status": "error", "message": "test"}),
],
)
@mock.patch(f"{HOOK_PATH}.get_request_url_header_params")
def test_get_sql_api_query_status(
self, mock_geturl_header_params, status_code, response, expected_response, mock_requests
):
"""Test get_sql_api_query_status function by mocking the status, response and expected
response"""
req_id = uuid.uuid4()
params = {"requestId": str(req_id), "page": 2, "pageSize": 10}
mock_geturl_header_params.return_value = HEADERS, params, "/test/airflow/"
class MockResponse:
def __init__(self, status_code, data):
self.status_code = status_code
self.data = data
def json(self):
return self.data
def raise_for_status(self):
return
mock_requests.request.return_value = MockResponse(status_code, response)
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
assert hook.get_sql_api_query_status("uuid") == expected_response
@pytest.mark.asyncio
@pytest.mark.parametrize(
("status_code", "response", "expected_response"),
[
(
200,
{
"status": "success",
"message": "Statement executed successfully.",
"statementHandle": "uuid",
},
{
"status": "success",
"message": "Statement executed successfully.",
"statement_handles": ["uuid"],
},
),
(
200,
{
"status": "success",
"message": "Statement executed successfully.",
"statementHandles": ["uuid", "uuid1"],
},
{
"status": "success",
"message": "Statement executed successfully.",
"statement_handles": ["uuid", "uuid1"],
},
),
(202, {}, {"status": "running", "message": "Query statements are still running"}),
(422, {"message": "test"}, {"status": "error", "message": "test"}),
(404, {"status": "error", "message": "test"}, {"status": "error", "message": "test"}),
],
)
@mock.patch(f"{HOOK_PATH}.get_request_url_header_params")
async def test_get_sql_api_query_status_async(
self, mock_geturl_header_params, status_code, response, expected_response, mock_async_request
):
"""Test Async get_sql_api_query_status_async function by mocking the status,
response and expected response"""
req_id = uuid.uuid4()
params = {"requestId": str(req_id), "page": 2, "pageSize": 10}
mock_geturl_header_params.return_value = HEADERS, params, "/test/airflow/"
mock_async_request.__aenter__.return_value.status = status_code
mock_async_request.__aenter__.return_value.json = AsyncMock(return_value=response)
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
response = await hook.get_sql_api_query_status_async("uuid")
assert response == expected_response
@pytest.mark.parametrize("hook_params", [(HOOK_PARAMS), ({})])
def test_hook_parameter_propagation(self, hook_params):
"""
This tests the proper propagation of unpacked hook params into the SnowflakeSqlApiHook object.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn", **hook_params)
assert hook.database == hook_params.get("database", None)
assert hook.schema == hook_params.get("schema", None)
assert hook.warehouse == hook_params.get("warehouse", None)
assert hook.role == hook_params.get("role", None)
@pytest.mark.parametrize(
("test_hook_params", "sql", "statement_count", "expected_payload", "expected_response"),
[
(
{},
SINGLE_STMT,
1,
{
"statement": SINGLE_STMT,
"resultSetMetaData": {"format": "json"},
"database": CONN_PARAMS["database"],
"schema": CONN_PARAMS["schema"],
"warehouse": CONN_PARAMS["warehouse"],
"role": CONN_PARAMS["role"],
"bindings": {},
"parameters": {
"MULTI_STATEMENT_COUNT": 1,
"query_tag": "",
},
},
{"statementHandle": "uuid"},
),
(
{},
SQL_MULTIPLE_STMTS,
4,
{
"statement": SQL_MULTIPLE_STMTS,
"resultSetMetaData": {"format": "json"},
"database": CONN_PARAMS["database"],
"schema": CONN_PARAMS["schema"],
"warehouse": CONN_PARAMS["warehouse"],
"role": CONN_PARAMS["role"],
"bindings": {},
"parameters": {
"MULTI_STATEMENT_COUNT": 4,
"query_tag": "",
},
},
{"statementHandles": ["uuid", "uuid1"]},
),
(
HOOK_PARAMS,
SINGLE_STMT,
1,
{
"statement": SINGLE_STMT,
"resultSetMetaData": {"format": "json"},
"database": HOOK_PARAMS["database"],
"schema": HOOK_PARAMS["schema"],
"warehouse": HOOK_PARAMS["warehouse"],
"role": HOOK_PARAMS["role"],
"bindings": {},
"parameters": {
"MULTI_STATEMENT_COUNT": 1,
"query_tag": "",
},
},
{"statementHandle": "uuid"},
),
(
HOOK_PARAMS,
SQL_MULTIPLE_STMTS,
4,
{
"statement": SQL_MULTIPLE_STMTS,
"resultSetMetaData": {"format": "json"},
"database": HOOK_PARAMS["database"],
"schema": HOOK_PARAMS["schema"],
"warehouse": HOOK_PARAMS["warehouse"],
"role": HOOK_PARAMS["role"],
"bindings": {},
"parameters": {
"MULTI_STATEMENT_COUNT": 4,
"query_tag": "",
},
},
{"statementHandles": ["uuid", "uuid1"]},
),
],
)
@mock.patch("uuid.uuid4")
@mock.patch(f"{HOOK_PATH}._get_conn_params", new_callable=PropertyMock)
@mock.patch(f"{HOOK_PATH}.get_headers")
def test_proper_parametrization_of_execute_query_api_request(
self,
mock_get_headers,
mock_conn_param,
mock_uuid,
test_hook_params,
sql,
statement_count,
expected_payload,
expected_response,
mock_requests,
):
"""
This tests if the query execution ordered by POST request to Snowflake API
is sent with proper context parameters (database, schema, warehouse, role)
"""
mock_uuid.return_value = "uuid"
params = {"requestId": "uuid", "async": True, "pageSize": 10}
mock_conn_param.return_value = CONN_PARAMS
mock_get_headers.return_value = HEADERS
mock_requests.codes.ok = 200
mock_requests.request.side_effect = [
create_successful_response_mock(expected_response),
]
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
hook = SnowflakeSqlApiHook("mock_conn_id", **test_hook_params)
url = f"{hook.account_identifier}.snowflakecomputing.com/api/v2/statements"
hook.execute_query(sql, statement_count)
mock_requests.request.assert_called_once_with(
method="post", url=url, headers=HEADERS, json=expected_payload, params=params
)
@pytest.mark.parametrize(
("status_code", "should_retry"),
[
(429, True), # Too Many Requests - should retry
(503, True), # Service Unavailable - should retry
(504, True), # Gateway Timeout - should retry
(500, False), # Internal Server Error - should not retry
(400, False), # Bad Request - should not retry
(401, False), # Unauthorized - should not retry
(404, False), # Not Found - should not retry
],
)
def test_make_api_call_with_retries_http_errors(self, status_code, should_retry, mock_requests):
"""
Test that _make_api_call_with_retries method only retries on specific HTTP status codes.
Should retry on 429, 503, 504 but not on other error codes.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
# Mock failed response
failed_response = mock.MagicMock()
failed_response.status_code = status_code
failed_response.json.return_value = {"error": "test error"}
failed_response.raise_for_status.side_effect = requests.exceptions.HTTPError(response=failed_response)
# Mock successful response for retries
success_response = mock.MagicMock()
success_response.status_code = 200
success_response.json.return_value = {"statementHandle": "uuid"}
success_response.raise_for_status.return_value = None
if should_retry:
# For retryable errors, first call fails, second succeeds
mock_requests.request.side_effect = [failed_response, success_response]
status_code, resp_json = hook._make_api_call_with_retries(
method="GET",
url=API_URL,
headers=HEADERS,
)
assert status_code == 200
assert resp_json == {"statementHandle": "uuid"}
assert mock_requests.request.call_count == 2
mock_requests.request.assert_has_calls(
[
call(
method="get",
json=None,
url=API_URL,
params=None,
headers=HEADERS,
),
call(
method="get",
json=None,
url=API_URL,
params=None,
headers=HEADERS,
),
]
)
else:
# For non-retryable errors, should fail immediately
mock_requests.request.side_effect = [failed_response]
with pytest.raises(requests.exceptions.HTTPError):
hook._make_api_call_with_retries(
method="GET",
url=API_URL,
headers=HEADERS,
)
assert mock_requests.request.call_count == 1
mock_requests.request.assert_called_with(
method="get",
json=None,
url=API_URL,
params=None,
headers=HEADERS,
)
def test_make_api_call_with_retries_connection_errors(self, mock_requests):
"""
Test that _make_api_call_with_retries method retries on connection errors.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
# Mock connection error then success
success_response = mock.MagicMock()
success_response.status_code = 200
success_response.json.return_value = {"statementHandle": "uuid"}
success_response.raise_for_status.return_value = None
mock_requests.request.side_effect = [
requests.exceptions.ConnectionError("Connection failed"),
success_response,
]
status_code, resp_json = hook._make_api_call_with_retries(
"POST", API_URL, HEADERS, json={"test": "data"}
)
assert status_code == 200
mock_requests.request.assert_called_with(
method="post",
url=API_URL,
params=None,
headers=HEADERS,
json={"test": "data"},
)
assert resp_json == {"statementHandle": "uuid"}
assert mock_requests.request.call_count == 2
def test_make_api_call_with_retries_timeout_errors(self, mock_requests):
"""
Test that _make_api_call_with_retries method retries on timeout errors.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
# Mock timeout error then success
success_response = mock.MagicMock()
success_response.status_code = 200
success_response.json.return_value = {"statementHandle": "uuid"}
success_response.raise_for_status.return_value = None
mock_requests.request.side_effect = [
requests.exceptions.Timeout("Request timed out"),
success_response,
]
status_code, resp_json = hook._make_api_call_with_retries("GET", API_URL, HEADERS)
assert status_code == 200
assert resp_json == {"statementHandle": "uuid"}
assert mock_requests.request.call_count == 2
def test_make_api_call_with_retries_max_attempts(self, mock_requests):
"""
Test that _make_api_call_with_retries method respects max retry attempts.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
# Mock response that always fails with retryable error
failed_response = mock.MagicMock()
failed_response.status_code = 429
failed_response.json.return_value = {"error": "rate limited"}
failed_response.raise_for_status.side_effect = requests.exceptions.HTTPError(response=failed_response)
mock_requests.request.side_effect = [failed_response] * 10 # More failures than max retries
with pytest.raises(requests.exceptions.HTTPError):
hook._make_api_call_with_retries("GET", API_URL, HEADERS)
# Should attempt 5 times (initial + 4 retries) based on default retry config
assert mock_requests.request.call_count == 5
def test_make_api_call_with_retries_success_no_retry(self, mock_requests):
"""
Test that _make_api_call_with_retries method doesn't retry on successful requests.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
# Mock successful response
success_response = mock.MagicMock()
success_response.status_code = 200
success_response.json.return_value = {"statementHandle": "uuid"}
success_response.raise_for_status.return_value = None
mock_requests.request.return_value = success_response
status_code, resp_json = hook._make_api_call_with_retries(
"POST", API_URL, HEADERS, json={"test": "data"}
)
assert status_code == 200
assert resp_json == {"statementHandle": "uuid"}
assert mock_requests.request.call_count == 1
def test_make_api_call_with_retries_unsupported_method(self):
"""
Test that _make_api_call_with_retries method raises ValueError for unsupported HTTP methods.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
with pytest.raises(ValueError, match="Unsupported HTTP method: PUT"):
hook._make_api_call_with_retries("PUT", API_URL, HEADERS)
def test_make_api_call_with_retries_custom_retry_config(self, mock_requests):
"""
Test that _make_api_call_with_retries method respects custom retry configuration.
"""
# Create hook with custom retry config
custom_retry_args = {
"stop": tenacity.stop_after_attempt(2), # Only 2 attempts instead of default 5
}
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn", api_retry_args=custom_retry_args)
# Mock response that always fails with retryable error
failed_response = mock.MagicMock()
failed_response.status_code = 503
failed_response.json.return_value = {"error": "service unavailable"}
failed_response.raise_for_status.side_effect = requests.exceptions.HTTPError(response=failed_response)
mock_requests.request.side_effect = [failed_response] * 3
with pytest.raises(requests.exceptions.HTTPError):
hook._make_api_call_with_retries("GET", API_URL, HEADERS)
# Should attempt only 2 times due to custom config
assert mock_requests.request.call_count == 2
@mock.patch(f"{MODULE_PATH}.time.sleep")
def test_wait_for_query_immediate_success(self, sleep_mock):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook.get_sql_api_query_status = mock.MagicMock(
return_value={"status": "success", "results": ["row1", "row2"]}
)
result = hook.wait_for_query(query_id="qid-123")
assert result == {"status": "success", "results": ["row1", "row2"]}
sleep_mock.assert_not_called()
@mock.patch(f"{MODULE_PATH}.time.sleep")
def test_wait_for_query_polls_until_done(self, sleep_mock):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook.get_sql_api_query_status = mock.MagicMock(
side_effect=[
{"status": "running"},
{"status": "running"},
{"status": "success", "data": [1, 2, 3]},
]
)
qid = "qid-456"
result = hook.wait_for_query(query_id=qid, poll_interval=2)
assert result == {"status": "success", "data": [1, 2, 3]}
assert sleep_mock.call_count == 2
sleep_mock.assert_has_calls([mock.call(2), mock.call(2)])
hook.get_sql_api_query_status.assert_has_calls(
[mock.call(query_id=qid), mock.call(query_id=qid), mock.call(query_id=qid)]
)
@mock.patch(f"{MODULE_PATH}.time.sleep")
def test_wait_for_query_error_raises_when_requested(self, sleep_mock):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook.get_sql_api_query_status = mock.MagicMock(return_value={"status": "error", "message": "oh no!"})
with pytest.raises(RuntimeError) as excinfo:
hook.wait_for_query("qid-789", raise_error=True)
assert str(excinfo.value) == "oh no!"
sleep_mock.assert_not_called()
@mock.patch(f"{MODULE_PATH}.time.sleep")
def test_wait_for_query_error_returns_when_not_raising(self, sleep_mock):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook.get_sql_api_query_status = mock.MagicMock(
return_value={"status": "error", "message": "still bad"}
)
result = hook.wait_for_query(query_id="qid-000", raise_error=False)
assert result == {"status": "error", "message": "still bad"}
sleep_mock.assert_not_called()
@mock.patch(f"{MODULE_PATH}.time.sleep")
def test_wait_for_query_handles_unknown_status(self, sleep_mock):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook.get_sql_api_query_status = mock.MagicMock(return_value={"status": "queued", "info": ["a", "b"]})
result = hook.wait_for_query(query_id="qid-111")
assert result == {"status": "queued", "info": ["a", "b"]}
sleep_mock.assert_not_called()
@mock.patch(f"{MODULE_PATH}.time.time")
@mock.patch(f"{MODULE_PATH}.time.sleep")
def test_wait_for_query_timeout_error(self, sleep_mock, time_mock):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
# Simulate a query that keeps running and never finishes
hook.get_sql_api_query_status = mock.MagicMock(return_value={"status": "running"})
# More side effects to ensure we hit the timeout and avoid StopIteration error
time_mock.side_effect = list(range(10))
qid = "qid-789"
timeout = 3
with pytest.raises(TimeoutError):
hook.wait_for_query(query_id=qid, timeout=timeout, poll_interval=1)
# Ensure that we polled multiple times before the timeout error is raised
assert sleep_mock.call_count == 3
sleep_mock.assert_has_calls([mock.call(1)] * 3)
assert hook.get_sql_api_query_status.call_count == 4
hook.get_sql_api_query_status.assert_has_calls([mock.call(query_id=qid)] * 4)
assert time_mock.call_count >= 3
@mock.patch(f"{HOOK_PATH}._make_api_call_with_retries")
@mock.patch(f"{HOOK_PATH}._process_response")
@mock.patch(f"{HOOK_PATH}.get_request_url_header_params")
def test_get_result_from_successful_sql_api_query_no_data(
self, mock_get_url, mock_process_response, mock_api_call
):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
mock_get_url.return_value = ("header", "params", "url")
mock_process_response.return_value = {"status": "success"}
mock_api_call.return_value = (200, {"data": []})
result = hook.get_result_from_successful_sql_api_query(query_id="qid-1")
assert result == []
mock_get_url.assert_called_once_with("qid-1")
mock_api_call.assert_called_once_with("GET", "url", "header", "params")
mock_process_response.assert_called_once_with(200, {"data": []})
@mock.patch(f"{HOOK_PATH}._make_api_call_with_retries")
@mock.patch(f"{HOOK_PATH}._process_response")
@mock.patch(f"{HOOK_PATH}.get_request_url_header_params")
def test_get_result_from_successful_sql_api_query_no_column_metadata(
self, mock_get_url, mock_process_response, mock_api_call
):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
mock_get_url.return_value = ("header", "params", "url")
mock_process_response.return_value = {"status": "success"}
mock_api_call.return_value = (200, {"data": [[1, 2]], "resultSetMetaData": {"rowType": []}})
result = hook.get_result_from_successful_sql_api_query(query_id="qid-2")
assert result == []
mock_get_url.assert_called_once_with("qid-2")
mock_api_call.assert_called_once_with("GET", "url", "header", "params")
mock_process_response.assert_called_once_with(
200, {"data": [[1, 2]], "resultSetMetaData": {"rowType": []}}
)
@mock.patch(f"{HOOK_PATH}._make_api_call_with_retries")
@mock.patch(f"{HOOK_PATH}._process_response")
@mock.patch(f"{HOOK_PATH}.get_request_url_header_params")
def test_get_result_from_successful_sql_api_query_raises_error(
self, mock_get_url, mock_process_response, mock_api_call
):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
mock_get_url.return_value = ("header", "params", "url")
mock_process_response.return_value = {"status": "failed"}
mock_api_call.return_value = (400, {"some": "response"})
with pytest.raises(
RuntimeError, match="Query must have status `success` to retrieve data; got `failed`."
):
hook.get_result_from_successful_sql_api_query(query_id="qid-3")
mock_get_url.assert_called_once_with("qid-3")
mock_api_call.assert_called_once_with("GET", "url", "header", "params")
mock_process_response.assert_called_once_with(400, {"some": "response"})
@mock.patch(f"{HOOK_PATH}._make_api_call_with_retries")
@mock.patch(f"{HOOK_PATH}._process_response")
@mock.patch(f"{HOOK_PATH}.get_request_url_header_params")
def test_get_result_from_successful_sql_api_query_single_partition(
self, mock_get_url, mock_process_response, mock_api_call
):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
mock_get_url.return_value = ("header", "params", "url")
mock_process_response.return_value = {"status": "success"}
mock_api_call.return_value = (
200,
{
"data": [[1, "a"], [2, "b"]],
"resultSetMetaData": {
"rowType": [{"name": "id"}, {"name": "val"}],
"partitionInfo": [{"p0": "p0"}], # Single partition
},
},
)
result = hook.get_result_from_successful_sql_api_query(query_id="qid-4")
expected = [{"id": 1, "val": "a"}, {"id": 2, "val": "b"}]
assert result == expected
# Only one API call (no additional partitions)
mock_api_call.assert_called_once_with("GET", "url", "header", "params")
mock_get_url.assert_called_once_with("qid-4")
mock_process_response.assert_called_once_with(
200,
{
"data": [[1, "a"], [2, "b"]],
"resultSetMetaData": {
"rowType": [{"name": "id"}, {"name": "val"}],
"partitionInfo": [{"p0": "p0"}],
},
},
)
@mock.patch(f"{HOOK_PATH}._make_api_call_with_retries")
@mock.patch(f"{HOOK_PATH}._process_response")
@mock.patch(f"{HOOK_PATH}.get_request_url_header_params")
def test_get_result_from_successful_sql_api_query_multiple_partitions(
self, mock_get_url, mock_process_response, mock_api_call
):
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
# Initial response with metadata and 2 partitions
initial_response = {
"data": [[1, "a"], [2, "b"]],
"resultSetMetaData": {
"rowType": [{"name": "id"}, {"name": "val"}],
"partitionInfo": [{"p0": "p0"}, {"p1": "p1"}],
},
}
# Second partition response
second_response = {
"data": [[3, "c"], [4, "d"]],
"resultSetMetaData": {
"rowType": [{"name": "id"}, {"name": "val"}],
"partitionInfo": [{"p0": "p0"}, {"p1": "p1"}],
},
}
mock_get_url.return_value = ("header", "params", "https://example.com/api/query")
mock_process_response.return_value = {"status": "success"}
mock_api_call.side_effect = [
(200, initial_response), # Initial call
(200, second_response), # Partition 1
]
result = hook.get_result_from_successful_sql_api_query(query_id="qid-5")
expected = [
{"id": 1, "val": "a"},
{"id": 2, "val": "b"},
{"id": 3, "val": "c"},
{"id": 4, "val": "d"},
]
assert result == expected
# Two API calls: first for the initial query, second for partition 1
assert mock_api_call.call_count == 2
mock_api_call.assert_any_call("GET", "https://example.com/api/query", "header", "params")
mock_api_call.assert_any_call("GET", "https://example.com/api/query?partition=1", "header", "params")
mock_get_url.assert_called_once_with("qid-5")
mock_process_response.assert_called_once()
@pytest.mark.asyncio
async def test_make_api_call_with_retries_async_success(self, mock_async_request):
"""
Test that _make_api_call_with_retries_async returns response on success.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
mock_response = create_async_request_client_response_success()
mock_async_request.__aenter__.return_value = mock_response
status_code, resp_json = await hook._make_api_call_with_retries_async("GET", API_URL, HEADERS)
assert status_code == 200
assert resp_json == GET_RESPONSE
assert mock_async_request.__aenter__.call_count == 1
@pytest.mark.asyncio
async def test_make_api_call_with_retries_async_retryable_http_error(self, mock_async_request):
"""
Test that _make_api_call_with_retries_async retries on retryable HTTP errors (429, 503, 504).
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
# First response: 429, then 200
mock_response_429 = create_async_request_client_response_error()
mock_response_200 = create_async_request_client_response_success()
# Side effect for request context manager
mock_async_request.__aenter__.side_effect = [
mock_response_429,
mock_response_200,
]
status_code, resp_json = await hook._make_api_call_with_retries_async("GET", API_URL, HEADERS)
assert status_code == 200
assert resp_json == GET_RESPONSE
assert mock_async_request.__aenter__.call_count == 2
@pytest.mark.asyncio
async def test_make_api_call_with_retries_async_non_retryable_http_error(self, mock_async_request):
"""
Test that _make_api_call_with_retries_async does not retry on non-retryable HTTP errors.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
mock_response_400 = create_async_request_client_response_error(status_code=400)
mock_async_request.__aenter__.return_value = mock_response_400
with pytest.raises(aiohttp.ClientResponseError):
await hook._make_api_call_with_retries_async("GET", API_URL, HEADERS)
assert mock_async_request.__aenter__.call_count == 1
@pytest.mark.asyncio
async def test_make_api_call_with_retries_async_connection_error(self, mock_async_request):
"""
Test that _make_api_call_with_retries_async retries on connection errors.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
# First: connection error, then: success
failed_conn = create_async_connection_error()
mock_request_200 = create_async_request_client_response_success()
# Side effect for request context manager
mock_async_request.__aenter__.side_effect = [
failed_conn,
mock_request_200,
]
status_code, resp_json = await hook._make_api_call_with_retries_async("GET", API_URL, HEADERS)
assert status_code == 200
assert resp_json == GET_RESPONSE
assert mock_async_request.__aenter__.call_count == 2
@pytest.mark.asyncio
async def test_make_api_call_with_retries_async_max_attempts(self, mock_async_request):
"""
Test that _make_api_call_with_retries_async respects max retry attempts.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
mock_request_429 = create_async_request_client_response_error(status_code=429)
# Always returns 429
mock_async_request.__aenter__.side_effect = [mock_request_429] * 5
with pytest.raises(aiohttp.ClientResponseError):
await hook._make_api_call_with_retries_async("GET", API_URL, HEADERS)
# Should attempt 5 times (default max retries)
assert mock_async_request.__aenter__.call_count == 5
@pytest.mark.asyncio
async def test_make_api_call_with_retries_async_unsupported_method(self, mock_async_request):
"""
Test that _make_api_call_with_retries_async raises ValueError for unsupported HTTP methods.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
with pytest.raises(ValueError, match="Unsupported HTTP method: PATCH"):
await hook._make_api_call_with_retries_async("PATCH", API_URL, HEADERS)
# No HTTP call should be made
assert mock_async_request.__aenter__.call_count == 0
@pytest.mark.asyncio
async def test_make_api_call_with_retries_async_json_decode_error_prevention(self, mock_async_request):
"""
Test that _make_api_call_with_retries_async calls raise_for_status() before response.json()
to prevent JSONDecodeError when response is not valid JSON.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
failed_response = mock.MagicMock()
failed_response.status = 500
failed_response.json = AsyncMock(side_effect=ValueError("Invalid JSON"))
failed_response.raise_for_status.side_effect = aiohttp.ClientResponseError(
request_info=mock.MagicMock(),
history=mock.MagicMock(),
status=500,
message="Internal Server Error",
)
mock_async_request.__aenter__.return_value = failed_response
with pytest.raises(aiohttp.ClientResponseError):
await hook._make_api_call_with_retries_async("GET", API_URL, HEADERS)
failed_response.raise_for_status.assert_called_once()
failed_response.json.assert_not_called()
def test_make_api_call_with_retries_json_decode_error_prevention(self, mock_requests):
"""
Test that _make_api_call_with_retries calls raise_for_status() before response.json()
to prevent JSONDecodeError when response is not valid JSON.
"""
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
failed_response = mock.MagicMock()
failed_response.status_code = 500
failed_response.json.side_effect = requests.exceptions.JSONDecodeError("Invalid JSON", "", 0)
failed_response.raise_for_status.side_effect = requests.exceptions.HTTPError(response=failed_response)
mock_requests.request.return_value = failed_response
with pytest.raises(requests.exceptions.HTTPError):
hook._make_api_call_with_retries("GET", API_URL, HEADERS)
failed_response.raise_for_status.assert_called_once()
failed_response.json.assert_not_called()
@mock.patch(f"{HOOK_PATH}.get_request_url_header_params")
def test_cancel_sql_api_query_execution(self, mock_get_url_header_params, mock_requests):
"""Test _cancel_sql_api_query_execution makes POST request with /cancel suffix."""
query_id = "test-query-id"
mock_get_url_header_params.return_value = (
HEADERS,
{"requestId": "uuid"},
f"{API_URL}/{query_id}/cancel",
)
mock_requests.request.return_value = create_successful_response_mock(
{"status": "success", "message": "Statement cancelled."}
)
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook._cancel_sql_api_query_execution(query_id)
mock_get_url_header_params.assert_called_once_with(query_id, "/cancel")
mock_requests.request.assert_called_once_with(
method="post",
url=f"{API_URL}/{query_id}/cancel",
headers=HEADERS,
params={"requestId": "uuid"},
json=None,
)
@mock.patch(f"{HOOK_PATH}._cancel_sql_api_query_execution")
def test_cancel_queries(self, mock_cancel_execution):
"""Test cancel_queries calls _cancel_sql_api_query_execution for each query id."""
query_ids = ["query-1", "query-2", "query-3"]
hook = SnowflakeSqlApiHook(snowflake_conn_id="test_conn")
hook.cancel_queries(query_ids)
assert mock_cancel_execution.call_count == 3
mock_cancel_execution.assert_has_calls([call("query-1"), call("query-2"), call("query-3")])
| TestSnowflakeSqlApiHook |
python | pytorch__pytorch | test/dynamo/test_subclasses.py | {
"start": 95301,
"end": 96977
} | class ____(torch.nn.Module):
def forward(
self,
primals_1: "Sym(s47)", # PlainAOTInput(idx=0)
primals_2: "Sym(s16)", # PlainAOTInput(idx=1)
primals_3: "f32[s47, s16]", # SubclassGetAttrAOTInput(base=PlainAOTInput(idx=2), attr='a')
primals_4: "f32[s47, s16]", # SubclassGetAttrAOTInput(base=PlainAOTInput(idx=2), attr='b')
primals_5: "Sym(s47)", # SubclassSizeAOTInput(base=PlainAOTInput(idx=2), idx=0)
primals_6: "Sym(s16)", # SubclassSizeAOTInput(base=PlainAOTInput(idx=2), idx=1)
primals_7: "Sym(s16)", # SubclassStrideAOTInput(base=PlainAOTInput(idx=2), idx=0)
):
clone: "f32[s47, s16]" = torch.ops.aten.clone.default(primals_3); primals_3 = None
clone_1: "f32[s47, s16]" = torch.ops.aten.clone.default(primals_4); primals_4 = None
mul_6: "Sym(s16*s47)" = primals_1 * primals_2; primals_1 = primals_2 = None
view: "f32[s16*s47]" = torch.ops.aten.view.default(clone, [mul_6]); clone = None
view_1: "f32[s16*s47]" = torch.ops.aten.view.default(clone_1, [mul_6]); clone_1 = None
return (
view, # SubclassGetAttrAOTOutput(base=PlainAOTOutput(idx=0), attr='a')
view_1, # SubclassGetAttrAOTOutput(base=PlainAOTOutput(idx=0), attr='b')
mul_6, # SubclassSizeAOTOutput(base=PlainAOTOutput(idx=0), idx=0)
primals_5, # SavedForBackwardsAOTOutput(idx=0)
primals_7, # SavedForBackwardsAOTOutput(idx=1)
)
""", # noqa: B950
)
self.assertExpectedInline(
normalize_gm(bw[0].print_readable(print_output=False, expanded_def=True)),
"""\
| GraphModule |
python | getsentry__sentry | src/sentry/plugins/base/v2.py | {
"start": 10732,
"end": 11045
} | class ____(IPlugin2, metaclass=PluginMount):
"""
A plugin should be treated as if it were a singleton. The owner does not
control when or how the plugin gets instantiated, nor is it guaranteed that
it will happen, or happen more than once.
"""
__version__ = 2
__all__ = ("Plugin2",)
| Plugin2 |
python | getsentry__sentry | src/sentry/api/endpoints/artifact_bundles.py | {
"start": 1148,
"end": 1357
} | class ____(SentryAPIException):
status_code = status.HTTP_400_BAD_REQUEST
code = "invalid_sort_by_parameter"
message = "You can either sort via 'date_added' or 'date_modified'"
| InvalidSortByParameter |
python | huggingface__transformers | src/transformers/models/ovis2/modeling_ovis2.py | {
"start": 5946,
"end": 8023
} | class ____(nn.Module):
def __init__(self, config: Ovis2VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
padding="valid",
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
self.rms_norm = Ovis2RMSNorm(config.hidden_size, config.rms_norm_eps)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
embeddings = patch_embeds.flatten(2).transpose(1, 2)
embeddings = self.rms_norm(embeddings)
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| Ovis2VisionEmbeddings |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_union.py | {
"start": 29325,
"end": 39049
} | class ____:
def __init__(self, type_: Literal['alien']) -> None:
self.type_ = 'alien'
@pytest.fixture
def model_a_b_union_schema() -> core_schema.UnionSchema:
return core_schema.union_schema(
[
core_schema.model_schema(
cls=ModelA,
schema=core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(core_schema.str_schema()),
'b': core_schema.model_field(core_schema.str_schema()),
},
),
),
core_schema.model_schema(
cls=ModelB,
schema=core_schema.model_fields_schema(
fields={
'c': core_schema.model_field(core_schema.str_schema()),
'd': core_schema.model_field(core_schema.str_schema()),
},
),
),
]
)
@pytest.fixture
def union_of_unions_schema(model_a_b_union_schema: core_schema.UnionSchema) -> core_schema.UnionSchema:
return core_schema.union_schema(
[
model_a_b_union_schema,
core_schema.union_schema(
[
core_schema.model_schema(
cls=ModelCat,
schema=core_schema.model_fields_schema(
fields={
'type_': core_schema.model_field(core_schema.literal_schema(['cat'])),
},
),
),
core_schema.model_schema(
cls=ModelDog,
schema=core_schema.model_fields_schema(
fields={
'type_': core_schema.model_field(core_schema.literal_schema(['dog'])),
},
),
),
]
),
]
)
@pytest.mark.parametrize(
'input,expected',
[
(ModelA(a='a', b='b'), {'a': 'a', 'b': 'b'}),
(ModelB(c='c', d='d'), {'c': 'c', 'd': 'd'}),
(ModelCat(type_='cat'), {'type_': 'cat'}),
(ModelDog(type_='dog'), {'type_': 'dog'}),
],
)
def test_union_of_unions_of_models(union_of_unions_schema: core_schema.UnionSchema, input: Any, expected: Any) -> None:
s = SchemaSerializer(union_of_unions_schema)
assert s.to_python(input, warnings='error') == expected
def test_union_of_unions_of_models_invalid_variant(union_of_unions_schema: core_schema.UnionSchema) -> None:
s = SchemaSerializer(union_of_unions_schema)
# All warnings should be available
messages = [
'Expected `ModelA`',
'Expected `ModelB`',
'Expected `ModelCat`',
'Expected `ModelDog`',
]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
s.to_python(ModelAlien(type_='alien'))
for m in messages:
assert m in str(w[0].message)
assert 'input_type=ModelAlien' in str(w[0].message)
@pytest.fixture
def tagged_union_of_unions_schema(model_a_b_union_schema: core_schema.UnionSchema) -> core_schema.UnionSchema:
return core_schema.union_schema(
[
model_a_b_union_schema,
core_schema.tagged_union_schema(
discriminator='type_',
choices={
'cat': core_schema.model_schema(
cls=ModelCat,
schema=core_schema.model_fields_schema(
fields={
'type_': core_schema.model_field(core_schema.literal_schema(['cat'])),
},
),
),
'dog': core_schema.model_schema(
cls=ModelDog,
schema=core_schema.model_fields_schema(
fields={
'type_': core_schema.model_field(core_schema.literal_schema(['dog'])),
},
),
),
},
),
]
)
@pytest.mark.parametrize(
'input,expected',
[
(ModelA(a='a', b='b'), {'a': 'a', 'b': 'b'}),
(ModelB(c='c', d='d'), {'c': 'c', 'd': 'd'}),
(ModelCat(type_='cat'), {'type_': 'cat'}),
(ModelDog(type_='dog'), {'type_': 'dog'}),
],
)
def test_union_of_unions_of_models_with_tagged_union(
tagged_union_of_unions_schema: core_schema.UnionSchema, input: Any, expected: Any
) -> None:
s = SchemaSerializer(tagged_union_of_unions_schema)
assert s.to_python(input, warnings='error') == expected
def test_union_of_unions_of_models_with_tagged_union_invalid_variant(
tagged_union_of_unions_schema: core_schema.UnionSchema,
) -> None:
s = SchemaSerializer(tagged_union_of_unions_schema)
# All warnings should be available
messages = [
'Expected `ModelA`',
'Expected `ModelB`',
'Expected `ModelCat`',
'Expected `ModelDog`',
]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
s.to_python(ModelAlien(type_='alien'))
for m in messages:
assert m in str(w[0].message)
assert 'input_type=ModelAlien' in str(w[0].message)
def test_mixed_union_models_and_other_types() -> None:
s = SchemaSerializer(
core_schema.union_schema(
[
core_schema.tagged_union_schema(
discriminator='type_',
choices={
'cat': core_schema.model_schema(
cls=ModelCat,
schema=core_schema.model_fields_schema(
fields={
'type_': core_schema.model_field(core_schema.literal_schema(['cat'])),
},
),
),
'dog': core_schema.model_schema(
cls=ModelDog,
schema=core_schema.model_fields_schema(
fields={
'type_': core_schema.model_field(core_schema.literal_schema(['dog'])),
},
),
),
},
),
core_schema.str_schema(),
]
)
)
assert s.to_python(ModelCat(type_='cat'), warnings='error') == {'type_': 'cat'}
assert s.to_python(ModelDog(type_='dog'), warnings='error') == {'type_': 'dog'}
# note, this fails as ModelCat and ModelDog (discriminator warnings, etc), but the warnings
# don't bubble up to this level :)
assert s.to_python('a string', warnings='error') == 'a string'
@pytest.mark.parametrize(
'input,expected',
[
({True: '1'}, b'{"true":"1"}'),
({1: '1'}, b'{"1":"1"}'),
({2.3: '1'}, b'{"2.3":"1"}'),
({'a': 'b'}, b'{"a":"b"}'),
],
)
def test_union_of_unions_of_models_with_tagged_union_json_key_serialization(
input: dict[bool | int | float | str, str], expected: bytes
) -> None:
s = SchemaSerializer(
core_schema.dict_schema(
keys_schema=core_schema.union_schema(
[
core_schema.union_schema([core_schema.bool_schema(), core_schema.int_schema()]),
core_schema.union_schema([core_schema.float_schema(), core_schema.str_schema()]),
]
),
values_schema=core_schema.str_schema(),
)
)
assert s.to_json(input, warnings='error') == expected
@pytest.mark.parametrize(
'input,expected',
[
({'key': True}, b'{"key":true}'),
({'key': 1}, b'{"key":1}'),
({'key': 2.3}, b'{"key":2.3}'),
({'key': 'a'}, b'{"key":"a"}'),
],
)
def test_union_of_unions_of_models_with_tagged_union_json_serialization(
input: dict[str, bool | int | float | str], expected: bytes
) -> None:
s = SchemaSerializer(
core_schema.dict_schema(
keys_schema=core_schema.str_schema(),
values_schema=core_schema.union_schema(
[
core_schema.union_schema([core_schema.bool_schema(), core_schema.int_schema()]),
core_schema.union_schema([core_schema.float_schema(), core_schema.str_schema()]),
]
),
)
)
assert s.to_json(input, warnings='error') == expected
def test_discriminated_union_ser_with_typed_dict() -> None:
v = SchemaSerializer(
core_schema.tagged_union_schema(
{
'a': core_schema.typed_dict_schema(
{
'type': core_schema.typed_dict_field(core_schema.literal_schema(['a'])),
'a': core_schema.typed_dict_field(core_schema.int_schema()),
}
),
'b': core_schema.typed_dict_schema(
{
'type': core_schema.typed_dict_field(core_schema.literal_schema(['b'])),
'b': core_schema.typed_dict_field(core_schema.str_schema()),
}
),
},
discriminator='type',
)
)
assert v.to_python({'type': 'a', 'a': 1}, warnings='error') == {'type': 'a', 'a': 1}
assert v.to_python({'type': 'b', 'b': 'foo'}, warnings='error') == {'type': 'b', 'b': 'foo'}
| ModelAlien |
python | pytorch__pytorch | torch/distributed/_local_tensor/__init__.py | {
"start": 67491,
"end": 71858
} | class ____:
"""
LocalTensor-aware version of _PhiloxState that manages per-rank RNG states.
This class handles the case where the generator state is a LocalTensor, allowing
different offsets and seeds for different virtual ranks.
Note: This is designed to be used as a drop-in replacement for _PhiloxState
when working with LocalTensors in the DTensor random ops implementation.
"""
def __init__(self, state: torch.Tensor):
assert isinstance(state, LocalTensor), (
"_LocalPhiloxState requires a LocalTensor"
)
self._local_tensor = state
self._per_rank_states = {
rank: local_state.to("cpu")
for rank, local_state in state._local_tensors.items()
}
@property
def state(self):
return LocalTensor(self._per_rank_states) # type: ignore[name-defined]
@property
def offset(self) -> Union[int, SymInt]:
from torch.distributed.tensor._random import _PhiloxState
offsets = {}
for rank, state in self._per_rank_states.items():
rank_philox = _PhiloxState(state)
offsets[rank] = rank_philox.offset
if len(set(offsets.values())) == 1:
return next(iter(offsets.values()))
# pyrefly: ignore [bad-argument-type, bad-argument-count]
return SymInt(LocalIntNode(offsets))
@offset.setter
def offset(self, offset: Union[int, SymInt]) -> None:
from torch.distributed.tensor._random import _PhiloxState
if isinstance(offset, SymInt) and isinstance(offset.node, LocalIntNode):
for rank, state in self._per_rank_states.items():
rank_offset = offset.node._local_ints[rank]
rank_philox = _PhiloxState(state)
rank_philox.offset = rank_offset
else:
offset_int = int(offset) if isinstance(offset, SymInt) else offset
for state in self._per_rank_states.values():
rank_philox = _PhiloxState(state)
rank_philox.offset = offset_int
@property
def seed(self) -> Union[int, SymInt]:
from torch.distributed.tensor._random import _PhiloxState
seeds = {}
for rank, state in self._per_rank_states.items():
rank_philox = _PhiloxState(state)
seeds[rank] = rank_philox.seed
if len(set(seeds.values())) == 1:
return next(iter(seeds.values()))
return SymInt(LocalIntNode(seeds))
@seed.setter
def seed(self, seed: Union[int, SymInt]) -> None:
from torch.distributed.tensor._random import _PhiloxState
if isinstance(seed, SymInt) and isinstance(seed.node, LocalIntNode):
for rank, state in self._per_rank_states.items():
rank_seed = seed.node._local_ints[rank]
rank_philox = _PhiloxState(state)
rank_philox.seed = rank_seed
else:
seed_int = int(seed) if isinstance(seed, SymInt) else seed
for state in self._per_rank_states.values():
rank_philox = _PhiloxState(state)
rank_philox.seed = seed_int
def apply_to_local_tensor_mode(self, device_handle) -> None:
"""
Apply per-rank RNG states to the LocalTensorMode's tracked states.
This updates both the device RNG state and the LocalTensorMode's _per_rank_rng_states.
Args:
device_handle: The device handle to use for setting RNG state (_LocalDeviceHandle)
"""
if not enabled_local_tensor_mode():
return
assert hasattr(self, "_per_rank_offsets")
for rank in sorted(self._per_rank_states.keys()):
offset_value = self._per_rank_offsets[rank]
if isinstance(offset_value, SymInt):
if isinstance(offset_value.node, LocalIntNode):
offset_value = offset_value.node._local_ints[rank]
else:
offset_value = int(offset_value)
offset_tensor = torch.tensor(
[offset_value], dtype=torch.uint64, device="cpu"
).view(torch.uint8)
self._per_rank_states[rank][8:] = offset_tensor
# pyrefly: ignore [bad-argument-type, bad-argument-count]
device_handle.set_rng_state(LocalTensor(self._per_rank_states))
| _LocalPhiloxState |
python | scipy__scipy | scipy/linalg/tests/test_decomp_update.py | {
"start": 66420,
"end": 68424
} | class ____(BaseQRupdate):
dtype = np.dtype('D')
def test_form_qTu():
# We want to ensure that all of the code paths through this function are
# tested. Most of them should be hit with the rest of test suite, but
# explicit tests make clear precisely what is being tested.
#
# This function expects that Q is either C or F contiguous and square.
# Economic mode decompositions (Q is (M, N), M != N) do not go through this
# function. U may have any positive strides.
#
# Some of these test are duplicates, since contiguous 1d arrays are both C
# and F.
q_order = ['F', 'C']
q_shape = [(8, 8), ]
u_order = ['F', 'C', 'A'] # here A means is not F not C
u_shape = [1, 3]
dtype = ['f', 'd', 'F', 'D']
for qo, qs, uo, us, d in \
itertools.product(q_order, q_shape, u_order, u_shape, dtype):
if us == 1:
check_form_qTu(qo, qs, uo, us, 1, d)
check_form_qTu(qo, qs, uo, us, 2, d)
else:
check_form_qTu(qo, qs, uo, us, 2, d)
def check_form_qTu(q_order, q_shape, u_order, u_shape, u_ndim, dtype):
rng = np.random.default_rng(47)
if u_shape == 1 and u_ndim == 1:
u_shape = (q_shape[0],)
else:
u_shape = (q_shape[0], u_shape)
dtype = np.dtype(dtype)
if dtype.char in 'fd':
q = rng.random(q_shape)
u = rng.random(u_shape)
elif dtype.char in 'FD':
q = rng.random(q_shape) + 1j*rng.random(q_shape)
u = rng.random(u_shape) + 1j*rng.random(u_shape)
else:
raise ValueError("form_qTu doesn't support this dtype")
q = np.require(q, dtype, q_order)
if u_order != 'A':
u = np.require(u, dtype, u_order)
else:
u, = make_strided((u.astype(dtype),))
rtol = 10.0 ** -(np.finfo(dtype).precision-2)
atol = 2*np.finfo(dtype).eps
expected = np.dot(q.T.conj(), u)
res = _decomp_update._form_qTu(q, u)
assert_allclose(res, expected, rtol=rtol, atol=atol)
| TestQRupdate_D |
python | protocolbuffers__protobuf | python/google/protobuf/internal/type_checkers.py | {
"start": 9374,
"end": 10048
} | class ____(object):
"""Checker used for double fields.
Performs type-check and range check.
"""
def CheckValue(self, proposed_value):
"""Check and convert proposed_value to float."""
if (not hasattr(proposed_value, '__float__') and
not hasattr(proposed_value, '__index__')) or (
type(proposed_value).__module__ == 'numpy' and
type(proposed_value).__name__ == 'ndarray'):
message = ('%.1024r has type %s, but expected one of: int, float' %
(proposed_value, type(proposed_value)))
raise TypeError(message)
return float(proposed_value)
def DefaultValue(self):
return 0.0
| DoubleValueChecker |
python | streamlit__streamlit | lib/tests/streamlit/config_util_test.py | {
"start": 1458,
"end": 10665
} | class ____(unittest.TestCase):
def test_clean(self):
result = config_util._clean(" clean this text ")
assert result == " clean this text "
def test_clean_empty_string(self):
result = config_util._clean("")
assert result == ""
def test_clean_paragraphs(self):
# from https://www.lipsum.com/
input_text = textwrap.dedent(
"""
Lorem ipsum dolor sit amet,
consectetur adipiscing elit.
Curabitur ac fermentum eros.
Maecenas libero est,
ultricies
eget ligula eget, """
)
truth = [
"Lorem ipsum dolor sit amet,\nconsectetur adipiscing elit.",
" Curabitur ac fermentum eros.",
"Maecenas libero est,\n ultricies\neget ligula eget, ",
]
result = config_util._clean_paragraphs(input_text)
assert truth == result
def test_clean_paragraphs_empty_string(self):
result = config_util._clean_paragraphs("")
assert result == [""]
@patch("click.secho")
def test_default_config_options_commented_out(self, patched_echo):
config_options = create_config_options(
{
"server.address": "example.com", # overrides default
"server.port": 8501, # explicitly set to default
}
)
config_util.show_config(CONFIG_SECTION_DESCRIPTIONS, config_options)
[(args, _)] = patched_echo.call_args_list
# Remove the ascii escape sequences used to color terminal output.
output = re.compile(r"\x1b[^m]*m").sub("", args[0])
lines = set(output.split("\n"))
# Config options not explicitly set should be commented out.
assert "# runOnSave = false" in lines
# Config options explicitly set should *not* be commented out, even if
# they are set to their default values.
assert 'address = "example.com"' in lines
assert "port = 8501" in lines
@patch("click.secho")
def test_ui_section_hidden(self, patched_echo):
config_options = create_config_options({})
config_util.show_config(CONFIG_SECTION_DESCRIPTIONS, config_options)
[(args, _)] = patched_echo.call_args_list
# Remove the ascii escape sequences used to color terminal output.
output = re.compile(r"\x1b[^m]*m").sub("", args[0])
lines = set(output.split("\n"))
assert "[ui]" not in lines
assert "# hideTopBar = false" not in lines
@parameterized.expand(
[
# Nothing changed.
(
{
"mapbox.token": "shhhhhhh",
"server.address": "localhost",
},
{
"mapbox.token": "shhhhhhh",
"server.address": "localhost",
},
False,
),
# A non-server config option changed.
(
{
"mapbox.token": "shhhhhhh",
"server.address": "localhost",
},
{
"mapbox.token": "SHHHHHHH!!!!!! >:(",
"server.address": "localhost",
},
False,
),
# A server config option changed.
(
{
"mapbox.token": "shhhhhhh",
"server.address": "localhost",
},
{
"mapbox.token": "shhhhhhh",
"server.address": "streamlit.io",
},
True,
),
]
)
def test_server_option_changed(self, old, new, changed):
old_options = create_config_options(old)
new_options = create_config_options(new)
assert config_util.server_option_changed(old_options, new_options) == changed
@patch("click.secho")
def test_newlines_preserved_in_description(self, patched_echo):
config_options = {
"server.customOption": ConfigOption(
key="server.customOption",
description="""
This option has multiple lines.
Each line should be preserved.
Even this one.
""",
default_val="default",
type_=str,
)
}
config_util.show_config(CONFIG_SECTION_DESCRIPTIONS, config_options)
[(args, _)] = patched_echo.call_args_list
# Remove the ascii escape sequences used to color terminal output.
output = re.compile(r"\x1b[^m]*m").sub("", args[0])
lines = set(output.split("\n"))
assert "# This option has multiple lines." in lines
assert "# Each line should be preserved." in lines
assert "# Even this one." in lines
@patch("click.secho")
def test_omits_empty_lines_at_description_start(self, patched_echo):
config_options = {
"server.customOption": ConfigOption(
key="server.customOption",
description="""
This option's description starts from third line.
All preceding empty lines should be removed.
""",
default_val="default",
type_=str,
)
}
config_util.show_config(CONFIG_SECTION_DESCRIPTIONS, config_options)
[(args, _)] = patched_echo.call_args_list
# Remove the ascii escape sequences used to color terminal output.
output = re.compile(r"\x1b[^m]*m").sub("", args[0])
lines = output.split("\n")
description_index = lines.index(
"# This option's description starts from third line."
)
assert description_index > 1, (
"Description should not be at the start of the output"
)
assert lines[description_index - 1].strip() == "", (
"Preceding line should be empty (this line separates config options)"
)
assert lines[description_index - 2].strip() != "", (
"The line before the preceding line should not be empty (this is the section header)"
)
@patch("click.secho")
def test_description_appears_before_option(self, patched_echo):
config_options = {
"server.customOption": ConfigOption(
key="server.customOption",
description="This option's description should appear before the option.",
default_val="default",
type_=str,
)
}
config_util.show_config(CONFIG_SECTION_DESCRIPTIONS, config_options)
[(args, _)] = patched_echo.call_args_list
# Remove the ascii escape sequences used to color terminal output.
output = re.compile(r"\x1b[^m]*m").sub("", args[0])
lines = output.split("\n")
# Find the index of the description and the option in the output.
description_index = lines.index(
"# This option's description should appear before the option."
)
option_index = lines.index('# customOption = "default"')
# Assert that the description appears before the option.
assert description_index < option_index
@patch("click.secho")
def test_show_config_section_formatting(self, patched_echo):
config_options = create_config_options(
{"server.address": "localhost", "theme.sidebar.primaryColor": "red"}
)
config_util.show_config(CONFIG_SECTION_DESCRIPTIONS, config_options)
[(args, _)] = patched_echo.call_args_list
output = re.compile(r"\x1b[^m]*m").sub("", args[0])
lines = output.split("\n")
assert "[server]" in lines
assert 'address = "localhost"' in lines
assert "[theme.sidebar]" in lines
assert 'primaryColor = "red"' in lines
@patch("click.secho")
def test_show_config_hidden_option(self, patched_echo):
config_options = {
"server.hiddenOption": ConfigOption(
key="server.hiddenOption",
description="This is a hidden option.",
default_val="default",
type_=str,
visibility="hidden",
)
}
config_util.show_config(CONFIG_SECTION_DESCRIPTIONS, config_options)
[(args, _)] = patched_echo.call_args_list
output = re.compile(r"\x1b[^m]*m").sub("", args[0])
lines = output.split("\n")
assert "# This is a hidden option." not in lines
@patch("click.secho")
def test_correctly_handles_show_error_details(self, patched_echo):
"""Test that show_config correctly handles showErrorDetails = "full"
based on a regression.
"""
config_util.show_config(
CONFIG_SECTION_DESCRIPTIONS,
create_config_options({}),
)
[(args, _)] = patched_echo.call_args_list
output = re.compile(r"\x1b[^m]*m").sub("", args[0])
assert 'showErrorDetails = "full"' in output
| ConfigUtilTest |
python | django__django | tests/admin_inlines/models.py | {
"start": 7141,
"end": 7424
} | class ____(models.Model):
my_own_pk = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
parent = models.ForeignKey(ParentModelWithCustomPk, models.CASCADE)
def get_absolute_url(self):
return "/child_model1/"
| ChildModel1 |
python | psf__requests | tests/test_utils.py | {
"start": 8044,
"end": 8295
} | class ____:
def test_valid(self):
assert is_ipv4_address("8.8.8.8")
@pytest.mark.parametrize("value", ("8.8.8.8.8", "localhost.localdomain"))
def test_invalid(self, value):
assert not is_ipv4_address(value)
| TestIsIPv4Address |
python | bokeh__bokeh | src/bokeh/models/css.py | {
"start": 3544,
"end": 3966
} | class ____(ImportedStyleSheet):
""" An imported stylesheet that's appended to the ``<head>`` element.
.. note::
A stylesheet will be appended only once, regardless of how
many times it's being used in other models.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| GlobalImportedStyleSheet |
python | doocs__leetcode | solution/3200-3299/3205.Maximum Array Hopping Score I/Solution.py | {
"start": 0,
"end": 257
} | class ____:
def maxScore(self, nums: List[int]) -> int:
@cache
def dfs(i: int) -> int:
return max(
[(j - i) * nums[j] + dfs(j) for j in range(i + 1, len(nums))] or [0]
)
return dfs(0)
| Solution |
python | ipython__ipython | IPython/core/display.py | {
"start": 17791,
"end": 20455
} | class ____(DisplayObject):
"""JSON expects a JSON-able dict or list
not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict or list containers.
"""
# wrap data in a property, which warns about passing already-serialized JSON
_data = None
def __init__(self, data=None, url=None, filename=None, expanded=False, metadata=None, root='root', **kwargs):
"""Create a JSON display object given raw data.
Parameters
----------
data : dict or list
JSON data to display. Not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict
or list containers.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
expanded : boolean
Metadata to control whether a JSON display component is expanded.
metadata : dict
Specify extra metadata to attach to the json display object.
root : str
The name of the root element of the JSON tree
"""
self.metadata = {
'expanded': expanded,
'root': root,
}
if metadata:
self.metadata.update(metadata)
if kwargs:
self.metadata.update(kwargs)
super(JSON, self).__init__(data=data, url=url, filename=filename)
def _check_data(self):
if self.data is not None and not isinstance(self.data, (dict, list)):
raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if isinstance(data, (Path, PurePath)):
data = str(data)
if isinstance(data, str):
if self.filename is None and self.url is None:
warnings.warn("JSON expects JSONable dict or list, not JSON strings")
data = json.loads(data)
self._data = data
def _data_and_metadata(self):
return self.data, self.metadata
def _repr_json_(self):
return self._data_and_metadata()
_css_t = """var link = document.createElement("link");
link.rel = "stylesheet";
link.type = "text/css";
link.href = "%s";
document.head.appendChild(link);
"""
_lib_t1 = """new Promise(function(resolve, reject) {
var script = document.createElement("script");
script.onload = resolve;
script.onerror = reject;
script.src = "%s";
document.head.appendChild(script);
}).then(() => {
"""
_lib_t2 = """
});"""
| JSON |
python | ray-project__ray | python/ray/train/v2/_internal/execution/context.py | {
"start": 1616,
"end": 2578
} | class ____:
"""Holds the metadata and context for the current training run."""
# The unique ID of the training run.
run_id: str = field(init=False, default_factory=lambda: uuid.uuid4().hex)
# The run configuration for the current training run.
run_config: RunConfig
# The configuration passed to the training function.
train_loop_config: Optional[Dict[str, Any]]
# The scaling configuration for the current training run.
scaling_config: ScalingConfig
# The configuration for the training backend (e.g., PyTorch, XGBoost).
backend_config: "BackendConfig"
# The datasets used in the current training run.
datasets: Dict[str, Dataset]
# The configuration for dataset ingestion and sharding.
dataset_config: "DataConfig"
def get_run_config(self) -> RunConfig:
"""Returns the run config of the current training run."""
return self.run_config
@dataclass(frozen=True)
| TrainRunContext |
python | xlwings__xlwings | xlwings/expansion.py | {
"start": 1269,
"end": 1729
} | class ____(Expander):
def expand(self, rng):
if rng(2, 1).raw_value in _empty:
return Range(rng(1, 1), rng(1, rng.shape[1]))
elif rng(3, 1).raw_value in _empty:
return Range(rng(1, 1), rng(2, rng.shape[1]))
else:
end_row = rng(2, 1).end("down").row - rng.row + 1
return Range(rng(1, 1), rng(end_row, rng.shape[1]))
VerticalExpander().register("vertical", "down", "d")
| VerticalExpander |
python | realpython__materials | fastapi-url-shortener/source_code_final/shortener_app/schemas.py | {
"start": 182,
"end": 234
} | class ____(URL):
url: str
admin_url: str
| URLInfo |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1186338,
"end": 1187524
} | class ____(sgqlc.types.Type, Node):
"""Represents a 'head_ref_force_pushed' event on a given pull
request.
"""
__schema__ = github_schema
__field_names__ = ("actor", "after_commit", "before_commit", "created_at", "pull_request", "ref")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
after_commit = sgqlc.types.Field(Commit, graphql_name="afterCommit")
"""Identifies the after commit SHA for the 'head_ref_force_pushed'
event.
"""
before_commit = sgqlc.types.Field(Commit, graphql_name="beforeCommit")
"""Identifies the before commit SHA for the 'head_ref_force_pushed'
event.
"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
pull_request = sgqlc.types.Field(sgqlc.types.non_null("PullRequest"), graphql_name="pullRequest")
"""PullRequest referenced by event."""
ref = sgqlc.types.Field("Ref", graphql_name="ref")
"""Identifies the fully qualified ref name for the
'head_ref_force_pushed' event.
"""
| HeadRefForcePushedEvent |
python | huggingface__transformers | tests/models/oneformer/test_modeling_oneformer.py | {
"start": 1592,
"end": 8633
} | class ____:
def __init__(
self,
parent,
batch_size=2,
is_training=True,
vocab_size=99,
use_auxiliary_loss=False,
num_queries=10,
num_channels=3,
min_size=32 * 8,
max_size=32 * 8,
num_labels=4,
hidden_dim=64,
sequence_length=77,
n_ctx=4,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.vocab_size = vocab_size
self.use_auxiliary_loss = use_auxiliary_loss
self.num_queries = num_queries
self.num_channels = num_channels
self.min_size = min_size
self.max_size = max_size
self.num_labels = num_labels
self.hidden_dim = hidden_dim
self.sequence_length = sequence_length
self.n_ctx = n_ctx
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
torch_device
)
task_inputs = (
torch.randint(high=self.vocab_size, size=(self.batch_size, self.sequence_length)).to(torch_device).long()
)
pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device)
text_inputs = (
torch.randint(
high=self.vocab_size, size=(self.batch_size, self.num_queries - self.n_ctx, self.sequence_length)
)
.to(torch_device)
.long()
)
mask_labels = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5
).float()
class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long()
config = self.get_config()
return config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels
def get_config(self):
config = OneFormerConfig(
text_encoder_vocab_size=self.vocab_size,
hidden_size=self.hidden_dim,
num_queries=self.num_queries,
num_labels=self.num_labels,
encoder_feedforward_dim=32,
dim_feedforward=64,
encoder_layers=2,
decoder_layers=2,
)
config.backbone_config.embed_dim = 16
config.backbone_config.depths = [1, 1, 1, 1]
config.backbone_config.hidden_size = 16
config.backbone_config.num_channels = self.num_channels
config.backbone_config.num_heads = [1, 1, 2, 2]
config.backbone = None
config.hidden_dim = self.hidden_dim
config.mask_dim = self.hidden_dim
config.conv_dim = self.hidden_dim
config.text_encoder_width = self.hidden_dim
config.task_seq_len = self.sequence_length
config.max_seq_len = self.sequence_length
config.text_encoder_context_length = self.sequence_length
config.text_encoder_n_ctx = self.n_ctx
return config
def prepare_config_and_inputs_for_common(self):
config, pixel_values, task_inputs, pixel_mask, _, _, _ = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask, "task_inputs": task_inputs}
return config, inputs_dict
def check_output_hidden_state(self, output, config):
encoder_hidden_states = output.encoder_hidden_states
pixel_decoder_hidden_states = output.pixel_decoder_hidden_states
transformer_decoder_hidden_states = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths))
self.parent.assertTrue(len(pixel_decoder_hidden_states), config.encoder_layers)
self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_layers - 1)
def create_and_check_oneformer_model(
self, config, pixel_values, task_inputs, pixel_mask, output_hidden_states=False
):
with torch.no_grad():
model = OneFormerModel(config=config)
model.to(torch_device)
model.eval()
output = model(pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask)
output = model(pixel_values, task_inputs=task_inputs, output_hidden_states=True)
# the correct shape of output.transformer_decoder_hidden_states ensure the correctness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_object_queries.shape,
(self.batch_size, self.num_queries, self.hidden_dim),
)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_hidden_states is not None)
self.parent.assertTrue(output.encoder_hidden_states is not None)
if output_hidden_states:
self.check_output_hidden_state(output, config)
def create_and_check_oneformer_universal_segmentation_head_model(
self, config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels
):
model = OneFormerForUniversalSegmentation(config=config)
model.to(torch_device)
model.eval()
def comm_check_on_output(result):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_hidden_states is not None)
self.parent.assertTrue(result.pixel_decoder_hidden_states is not None)
self.parent.assertTrue(result.encoder_hidden_states is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape,
(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4),
)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)
)
with torch.no_grad():
result = model(pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask)
result = model(pixel_values, task_inputs)
comm_check_on_output(result)
config.is_training = True
model = OneFormerForUniversalSegmentation(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(
pixel_values=pixel_values,
task_inputs=task_inputs,
pixel_mask=pixel_mask,
mask_labels=mask_labels,
class_labels=class_labels,
text_inputs=text_inputs,
)
comm_check_on_output(result)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape, torch.Size([1]))
@require_torch
| OneFormerModelTester |
python | numba__numba | numba/core/types/misc.py | {
"start": 8389,
"end": 8663
} | class ____(Type):
def __init__(self, name, members):
assert members in (2, 3)
self.members = members
self.has_step = members >= 3
super(SliceType, self).__init__(name)
@property
def key(self):
return self.members
| SliceType |
python | getsentry__sentry | src/sentry/integrations/discord/spec.py | {
"start": 778,
"end": 3291
} | class ____(MessagingIntegrationSpec):
@property
def provider_slug(self) -> str:
return IntegrationProviderSlug.DISCORD.value
@property
def action_service(self) -> ActionService:
return ActionService.DISCORD
@property
def integration_provider(self) -> type[IntegrationProvider]:
from sentry.integrations.discord.integration import DiscordIntegrationProvider
return DiscordIntegrationProvider
@property
def identity_view_set(self) -> MessagingIdentityLinkViewSet:
from sentry.integrations.discord.views.link_identity import DiscordLinkIdentityView
from sentry.integrations.discord.views.unlink_identity import DiscordUnlinkIdentityView
return MessagingIdentityLinkViewSet(
link_personal_identity=DiscordLinkIdentityView,
unlink_personal_identity=DiscordUnlinkIdentityView,
)
def send_incident_alert_notification(
self,
organization: Organization,
alert_context: AlertContext,
notification_context: NotificationContext,
metric_issue_context: MetricIssueContext,
open_period_context: OpenPeriodContext,
alert_rule_serialized_response: AlertRuleSerializerResponse,
incident_serialized_response: DetailedIncidentSerializerResponse,
notification_uuid: str | None = None,
) -> bool:
from sentry.integrations.discord.actions.metric_alert import (
send_incident_alert_notification,
)
return send_incident_alert_notification(
organization=organization,
alert_context=alert_context,
notification_context=notification_context,
metric_issue_context=metric_issue_context,
open_period_context=open_period_context,
alert_rule_serialized_response=alert_rule_serialized_response,
incident_serialized_response=incident_serialized_response,
notification_uuid=notification_uuid,
)
@property
def notify_service_action(self) -> type[IntegrationEventAction] | None:
from sentry.integrations.discord.actions.issue_alert.notification import (
DiscordNotifyServiceAction,
)
return DiscordNotifyServiceAction
@property
def notification_sent(self) -> type[analytics.Event] | None:
from sentry.integrations.discord.analytics import DiscordIntegrationNotificationSent
return DiscordIntegrationNotificationSent
| DiscordMessagingSpec |
python | kubernetes-client__python | kubernetes/e2e_test/test_utils.py | {
"start": 28221,
"end": 33599
} | class ____(unittest.TestCase):
def test_parse_quantity(self):
# == trivial returns ==
self.assertEqual(quantity.parse_quantity(Decimal(1)), Decimal(1))
self.assertEqual(quantity.parse_quantity(float(1)), Decimal(1))
self.assertEqual(quantity.parse_quantity(1), Decimal(1))
# == exceptions ==
self.assertRaises(
ValueError, lambda: quantity.parse_quantity("1000kb")
)
self.assertRaises(
ValueError, lambda: quantity.parse_quantity("1000ki")
)
self.assertRaises(ValueError, lambda: quantity.parse_quantity("1000foo"))
self.assertRaises(ValueError, lambda: quantity.parse_quantity("foo"))
# == no suffix ==
self.assertEqual(quantity.parse_quantity("1000"), Decimal(1000))
# == base 1024 ==
self.assertEqual(quantity.parse_quantity("1Ki"), Decimal(1024))
self.assertEqual(quantity.parse_quantity("1Mi"), Decimal(1024**2))
self.assertEqual(quantity.parse_quantity("1Gi"), Decimal(1024**3))
self.assertEqual(quantity.parse_quantity("1Ti"), Decimal(1024**4))
self.assertEqual(quantity.parse_quantity("1Pi"), Decimal(1024**5))
self.assertEqual(quantity.parse_quantity("1Ei"), Decimal(1024**6))
self.assertEqual(quantity.parse_quantity("1024Ki"), Decimal(1024**2))
self.assertEqual(quantity.parse_quantity("0.5Ki"), Decimal(512))
# == base 1000 ==
self.assertAlmostEqual(quantity.parse_quantity("1n"), Decimal(0.000_000_001))
self.assertAlmostEqual(quantity.parse_quantity("1u"), Decimal(0.000_001))
self.assertAlmostEqual(quantity.parse_quantity("1m"), Decimal(0.001))
self.assertEqual(quantity.parse_quantity("1k"), Decimal(1_000))
self.assertEqual(quantity.parse_quantity("1M"), Decimal(1_000_000))
self.assertEqual(quantity.parse_quantity("1G"), Decimal(1_000_000_000))
self.assertEqual(quantity.parse_quantity("1T"), Decimal(1_000_000_000_000))
self.assertEqual(quantity.parse_quantity("1P"), Decimal(1_000_000_000_000_000))
self.assertEqual(
quantity.parse_quantity("1E"), Decimal(1_000_000_000_000_000_000))
self.assertEqual(quantity.parse_quantity("1000k"), Decimal(1_000_000))
self.assertEqual(quantity.parse_quantity("500k"), Decimal(500_000))
def test_format_quantity(self):
"""Unit test for quantity.format_quantity. Testing the different SI suffixes and
function should return the expected string"""
# == unknown suffixes ==
self.assertRaises(
ValueError, lambda: quantity.format_quantity(Decimal(1_000), "kb")
)
self.assertRaises(
ValueError, lambda: quantity.format_quantity(Decimal(1_000), "ki")
)
self.assertRaises(
ValueError, lambda: quantity.format_quantity(Decimal(1_000), "foo")
)
# == no suffix ==
self.assertEqual(quantity.format_quantity(Decimal(1_000), ""), "1000")
self.assertEqual(quantity.format_quantity(Decimal(1_000), None), "1000")
# == base 1024 ==
self.assertEqual(quantity.format_quantity(Decimal(1024), "Ki"), "1Ki")
self.assertEqual(quantity.format_quantity(Decimal(1024**2), "Mi"), "1Mi")
self.assertEqual(quantity.format_quantity(Decimal(1024**3), "Gi"), "1Gi")
self.assertEqual(quantity.format_quantity(Decimal(1024**4), "Ti"), "1Ti")
self.assertEqual(quantity.format_quantity(Decimal(1024**5), "Pi"), "1Pi")
self.assertEqual(quantity.format_quantity(Decimal(1024**6), "Ei"), "1Ei")
self.assertEqual(quantity.format_quantity(Decimal(1024**2), "Ki"), "1024Ki")
self.assertEqual(quantity.format_quantity(Decimal((1024**3) / 2), "Gi"), "0.5Gi")
# Decimal((1024**3)/3) are 0.3333333333333333148296162562Gi; expecting to
# be quantized to 0.3Gi
self.assertEqual(
quantity.format_quantity(
Decimal(
(1024**3) / 3),
"Gi",
quantize=Decimal(.5)),
"0.3Gi")
# == base 1000 ==
self.assertEqual(quantity.format_quantity(Decimal(0.000_000_001), "n"), "1n")
self.assertEqual(quantity.format_quantity(Decimal(0.000_001), "u"), "1u")
self.assertEqual(quantity.format_quantity(Decimal(0.001), "m"), "1m")
self.assertEqual(quantity.format_quantity(Decimal(1_000), "k"), "1k")
self.assertEqual(quantity.format_quantity(Decimal(1_000_000), "M"), "1M")
self.assertEqual(quantity.format_quantity(Decimal(1_000_000_000), "G"), "1G")
self.assertEqual(
quantity.format_quantity(Decimal(1_000_000_000_000), "T"), "1T"
)
self.assertEqual(
quantity.format_quantity(Decimal(1_000_000_000_000_000), "P"), "1P"
)
self.assertEqual(
quantity.format_quantity(Decimal(1_000_000_000_000_000_000), "E"), "1E"
)
self.assertEqual(quantity.format_quantity(Decimal(1_000_000), "k"), "1000k")
# Decimal(1_000_000/3) are 333.3333333333333139307796955k; expecting to
# be quantized to 333k
self.assertEqual(
quantity.format_quantity(
Decimal(1_000_000 / 3), "k", quantize=Decimal(1000)
),
"333k",
)
| TestUtilsUnitTests |
python | huggingface__transformers | tests/models/arcee/test_modeling_arcee.py | {
"start": 1241,
"end": 2468
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = ArceeModelTester
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = ArceeForCausalLM if is_torch_available() else None
def test_arcee_mlp_uses_relu_squared(self):
"""Test that ArceeMLP uses ReLU² activation instead of SiLU."""
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
config.hidden_act = "relu2" # Ensure we're using relu2 activation
model = ArceeModel(config)
# Check that the MLP layers use the correct activation
mlp = model.layers[0].mlp
# Test with a simple input
x = torch.randn(1, 10, config.hidden_size)
up_output = mlp.up_proj(x)
# Verify ReLU² activation: x * relu(x)
expected_activation = up_output * torch.relu(up_output)
actual_activation = mlp.act_fn(up_output)
self.assertTrue(torch.allclose(expected_activation, actual_activation, atol=1e-5))
@require_torch_accelerator
| ArceeModelTest |
python | kamyu104__LeetCode-Solutions | Python/construct-the-minimum-bitwise-array-i.py | {
"start": 48,
"end": 301
} | class ____(object):
def minBitwiseArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
return [x-(((x+1)&~x)>>1) if x&1 else -1 for x in nums]
# Time: O(n * r)
# Space: O(1)
# brute force
| Solution |
python | ray-project__ray | python/ray/data/_internal/issue_detection/issue_detector_manager.py | {
"start": 711,
"end": 3767
} | class ____:
def __init__(self, executor: "StreamingExecutor"):
ctx = executor._data_context
self._issue_detectors: List[IssueDetector] = [
cls.from_executor(executor) for cls in ctx.issue_detectors_config.detectors
]
self._last_detection_times: Dict[IssueDetector, float] = {
detector: time.perf_counter() for detector in self._issue_detectors
}
self.executor = executor
self._operator_event_exporter = get_operator_event_exporter()
def invoke_detectors(self) -> None:
curr_time = time.perf_counter()
issues = []
for detector in self._issue_detectors:
if detector.detection_time_interval_s() == -1:
continue
if (
curr_time - self._last_detection_times[detector]
> detector.detection_time_interval_s()
):
issues.extend(detector.detect())
self._last_detection_times[detector] = time.perf_counter()
self._report_issues(issues)
def _report_issues(self, issues: List[Issue]) -> None:
operators: Dict[str, "PhysicalOperator"] = {}
op_to_id: Dict["PhysicalOperator", str] = {}
for i, operator in enumerate(self.executor._topology.keys()):
operators[operator.id] = operator
op_to_id[operator] = self.executor._get_operator_id(operator, i)
# Reset issue detector metrics for each operator so that previous issues
# don't affect the current ones.
operator.metrics._issue_detector_hanging = 0
operator.metrics._issue_detector_high_memory = 0
for issue in issues:
logger.warning(issue.message)
operator = operators.get(issue.operator_id)
if not operator:
continue
issue_event_type = format_export_issue_event_name(issue.issue_type)
if (
self._operator_event_exporter is not None
and issue_event_type
in ProtoOperatorEventData.DatasetOperatorEventType.keys()
):
event_time = time.time()
operator_event = OperatorEvent(
dataset_id=issue.dataset_name,
operator_id=op_to_id[operator],
operator_name=operator.name,
event_time=event_time,
event_type=issue_event_type,
message=issue.message,
)
self._operator_event_exporter.export_operator_event(operator_event)
if issue.issue_type == IssueType.HANGING:
operator.metrics._issue_detector_hanging += 1
if issue.issue_type == IssueType.HIGH_MEMORY:
operator.metrics._issue_detector_high_memory += 1
if len(issues) > 0:
logger.warning(
"To disable issue detection, run DataContext.get_current().issue_detectors_config.detectors = []."
)
| IssueDetectorManager |
python | doocs__leetcode | solution/1700-1799/1707.Maximum XOR With an Element From Array/Solution.py | {
"start": 722,
"end": 1162
} | class ____:
def maximizeXor(self, nums: List[int], queries: List[List[int]]) -> List[int]:
trie = Trie()
nums.sort()
j, n = 0, len(queries)
ans = [-1] * n
for i, (x, m) in sorted(zip(range(n), queries), key=lambda x: x[1][1]):
while j < len(nums) and nums[j] <= m:
trie.insert(nums[j])
j += 1
ans[i] = trie.search(x)
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/special_math_ops_test.py | {
"start": 14211,
"end": 16184
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_spence_boundary(self):
self.assertAllClose(np.pi**2 / 6., special_math_ops.spence(0.))
self.assertAllClose(0., special_math_ops.spence(1.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.spence(np.nan))))
# Check that the domain of definition is [0, inf)
self.assertTrue(
np.all(
np.isnan(
self.evaluate(
special_math_ops.spence(
np.random.uniform(-20., -1., size=int(1e3)))))))
@parameterized.parameters(np.float32, np.float64)
def test_spence_small(self, dtype):
x = np.random.uniform(0., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.spence(x), self.evaluate(special_math_ops.spence(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_spence_larger(self, dtype):
x = np.random.uniform(1., 100., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.spence(x), self.evaluate(special_math_ops.spence(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_spence_gradient(self):
inputs = [np.random.uniform(1., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.spence, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_spence_gradient_at_one(self):
analytical, _ = gradient_checker_v2.compute_gradient(
special_math_ops.spence, [1.])
self.assertAllClose([[[-1.]]], analytical)
@test_util.run_all_in_graph_and_eager_modes
| SpenceTest |
python | openai__openai-python | src/openai/lib/streaming/chat/_completions.py | {
"start": 8642,
"end": 10139
} | class ____(Generic[ResponseFormatT]):
"""Context manager over a `AsyncChatCompletionStream` that is returned by `.stream()`.
This context manager ensures the response cannot be leaked if you don't read
the stream to completion.
Usage:
```py
async with client.chat.completions.stream(...) as stream:
for event in stream:
...
```
"""
def __init__(
self,
api_request: Awaitable[AsyncStream[ChatCompletionChunk]],
*,
response_format: type[ResponseFormatT] | ResponseFormatParam | Omit,
input_tools: Iterable[ChatCompletionToolUnionParam] | Omit,
) -> None:
self.__stream: AsyncChatCompletionStream[ResponseFormatT] | None = None
self.__api_request = api_request
self.__response_format = response_format
self.__input_tools = input_tools
async def __aenter__(self) -> AsyncChatCompletionStream[ResponseFormatT]:
raw_stream = await self.__api_request
self.__stream = AsyncChatCompletionStream(
raw_stream=raw_stream,
response_format=self.__response_format,
input_tools=self.__input_tools,
)
return self.__stream
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
if self.__stream is not None:
await self.__stream.close()
| AsyncChatCompletionStreamManager |
python | walkccc__LeetCode | solutions/735. Asteroid Collision/735.py | {
"start": 0,
"end": 587
} | class ____:
def asteroidCollision(self, asteroids: list[int]) -> list[int]:
stack = []
for a in asteroids:
if a > 0:
stack.append(a)
else: # a < 0
# Destroy the previous positive one(s).
while stack and stack[-1] > 0 and stack[-1] < -a:
stack.pop()
if not stack or stack[-1] < 0:
stack.append(a)
elif stack[-1] == -a:
stack.pop() # Both asteroids explode.
else: # stack[-1] > the current asteroid.
pass # Destroy the current asteroid, so do nothing.
return stack
| Solution |
python | coleifer__peewee | peewee.py | {
"start": 163458,
"end": 163690
} | class ____(CharField):
field_type = 'CHAR'
def python_value(self, value):
value = super(FixedCharField, self).python_value(value)
if value:
value = value.strip()
return value
| FixedCharField |
python | allegroai__clearml | clearml/backend_api/services/v2_20/queues.py | {
"start": 23551,
"end": 26238
} | class ____(Request):
"""
Create a new queue
:param name: Queue name Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use, please don't use it.
:type system_tags: Sequence[str]
"""
_service = "queues"
_action = "create"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"name": {
"description": "Queue name Unique within the company.",
"type": "string",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["name"],
"type": "object",
}
def __init__(
self, name: str, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, **kwargs: Any
) -> None:
super(CreateRequest, self).__init__(**kwargs)
self.name = name
self.tags = tags
self.system_tags = system_tags
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
| CreateRequest |
python | realpython__materials | hashtable/01_hashtable_prototype/07_use_defensive_copying/hashtable.py | {
"start": 107,
"end": 1109
} | class ____:
def __init__(self, capacity):
self._pairs = capacity * [None]
def __len__(self):
return len(self._pairs)
def __delitem__(self, key):
if key in self:
self._pairs[self._index(key)] = None
else:
raise KeyError(key)
def __setitem__(self, key, value):
self._pairs[self._index(key)] = Pair(key, value)
def __getitem__(self, key):
pair = self._pairs[self._index(key)]
if pair is None:
raise KeyError(key)
return pair.value
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
@property
def pairs(self):
return [pair for pair in self._pairs if pair]
def _index(self, key):
return hash(key) % len(self)
| HashTable |
python | walkccc__LeetCode | solutions/1252. Cells with Odd Values in a Matrix/1252-2.py | {
"start": 0,
"end": 524
} | class ____:
def oddCells(self, m: int, n: int, indices: list[list[int]]) -> int:
# rows[i] and cols[i] :=
# 1. True (flipped even times)
# 2. False (flipped odd times)
rows = [False] * m
cols = [False] * n
for r, c in indices:
rows[r] ^= True
cols[c] ^= True
oddRowsCount = rows.count(True)
oddColsCount = cols.count(True)
evenRowsCount = m - oddRowsCount
evenColsCount = n - oddColsCount
return oddRowsCount * evenColsCount + oddColsCount * evenRowsCount
| Solution |
python | allegroai__clearml | clearml/utilities/proxy_object.py | {
"start": 2048,
"end": 3598
} | class ____(dict):
"""Dictionary wrapper that prevents modifications to the dictionary"""
def __init__(
self,
update_obj: Any,
update_func: Callable,
*args: Any,
**kwargs: Any,
) -> None:
super(ProxyDictPreWrite, self).__init__(*args, **kwargs)
self._update_func = None
for k, i in self.items():
if isinstance(i, dict):
self.update({k: ProxyDictPreWrite(k, self._nested_callback, i)})
self._update_obj = update_obj
self._update_func = update_func
def __reduce__(self) -> tuple:
return dict, (), None, None, iter(self.items())
def __setitem__(self, key: Any, value: Any) -> None:
key_value = self._set_callback(
(
key,
value,
)
)
if key_value:
super(ProxyDictPreWrite, self).__setitem__(*key_value)
def _set_callback(self, key_value: Any, *_: Any) -> Optional[Any]:
if self._update_func is not None:
if callable(self._update_func):
res = self._update_func(self._update_obj, key_value)
else:
res = self._update_func
if not res:
return None
return res
return key_value
def _nested_callback(self, prefix: str, key_value: tuple) -> tuple:
return self._set_callback(
(
prefix + "." + key_value[0],
key_value[1],
)
)
| ProxyDictPreWrite |
python | redis__redis-py | redis/maint_notifications.py | {
"start": 322,
"end": 843
} | class ____(enum.Enum):
"""Valid endpoint types used in CLIENT MAINT_NOTIFICATIONS command."""
INTERNAL_IP = "internal-ip"
INTERNAL_FQDN = "internal-fqdn"
EXTERNAL_IP = "external-ip"
EXTERNAL_FQDN = "external-fqdn"
NONE = "none"
def __str__(self):
"""Return the string value of the enum."""
return self.value
if TYPE_CHECKING:
from redis.connection import (
MaintNotificationsAbstractConnection,
MaintNotificationsAbstractConnectionPool,
)
| EndpointType |
python | cython__cython | Cython/Compiler/Code.py | {
"start": 47062,
"end": 47513
} | class ____:
"""Global info about a Python number constant held by GlobalState.
cname string
value string
py_type string int, long, float
value_code string evaluation code if different from value
"""
def __init__(self, cname, value, py_type, value_code=None):
self.cname = cname
self.value = value
self.py_type = py_type
self.value_code = value_code or value
| NumConst |
python | rapidsai__cudf | python/pylibcudf/tests/test_column_from_device.py | {
"start": 704,
"end": 3689
} | class ____:
def __init__(self, obj, dtype):
self.obj = rmm.DeviceBuffer.to_device(obj, plc.utils._get_stream())
self.dtype = dtype
self.shape = (int(len(self.obj) / self.dtype.itemsize),)
self.strides = (self.dtype.itemsize,)
self.typestr = self.dtype.str
@property
def __cuda_array_interface__(self):
return {
"data": self.obj.__cuda_array_interface__["data"],
"shape": self.shape,
"strides": self.strides,
"typestr": self.typestr,
"version": 0,
}
@pytest.fixture
def input_column(valid_type):
if valid_type == pa.bool_():
return pa.array([True, False, True], type=valid_type)
return pa.array([1, 2, 3], type=valid_type)
@pytest.fixture
def iface_obj(input_column):
data = input_column.to_numpy(zero_copy_only=False)
return DataBuffer(data.view("uint8"), data.dtype)
@pytest.mark.parametrize("patch_cai", [True, False])
def test_from_cuda_array_interface(
monkeypatch, input_column, iface_obj, patch_cai
):
if patch_cai:
# patch strides to be None to test C-configuous layout
monkeypatch.setattr(iface_obj, "strides", None)
res = plc.Column.from_cuda_array_interface(iface_obj)
assert_column_eq(input_column, res)
def test_from_rmm_buffer():
result = pa.array([1, 2, 3], type=pa.int32())
expected = plc.Column.from_rmm_buffer(
rmm.DeviceBuffer.to_device(
result.buffers()[1].to_pybytes(), plc.utils._get_stream()
),
plc.DataType.from_arrow(result.type),
len(result),
[],
)
assert_column_eq(result, expected)
result = pa.array(["a", "b", "c"], type=pa.string())
expected = plc.Column.from_rmm_buffer(
rmm.DeviceBuffer.to_device(
result.buffers()[2].to_pybytes(), plc.utils._get_stream()
),
plc.DataType.from_arrow(result.type),
len(result),
[
plc.Column.from_rmm_buffer(
rmm.DeviceBuffer.to_device(
result.buffers()[1].to_pybytes(), plc.utils._get_stream()
),
plc.DataType(plc.TypeId.INT32),
4,
[],
)
],
)
assert_column_eq(result, expected)
@pytest.mark.parametrize(
"dtype, children_data",
[
(plc.DataType(plc.TypeId.INT32), [[0, 1, 2]]),
(plc.DataType(plc.TypeId.STRING), []),
(plc.DataType(plc.TypeId.STRING), [[0, 1], [0, 1]]),
(plc.DataType(plc.TypeId.LIST), []),
],
)
def test_from_rmm_buffer_invalid(dtype, children_data):
buff = rmm.DeviceBuffer.to_device(b"", plc.utils._get_stream())
children = [
plc.Column.from_arrow(pa.array(child_data))
for child_data in children_data
]
with pytest.raises(ValueError):
plc.Column.from_rmm_buffer(
buff,
dtype,
0,
children,
)
| DataBuffer |
python | kamyu104__LeetCode-Solutions | Python/find-resultant-array-after-removing-anagrams.py | {
"start": 947,
"end": 1196
} | class ____(object):
def removeAnagrams(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
return [words[i] for i in xrange(len(words)) if i == 0 or sorted(words[i-1]) != sorted(words[i])]
| Solution3 |
python | aio-libs__aiohttp | aiohttp/client.py | {
"start": 4755,
"end": 5957
} | class ____:
total: float | None = None
connect: float | None = None
sock_read: float | None = None
sock_connect: float | None = None
ceil_threshold: float = 5
# pool_queue_timeout: Optional[float] = None
# dns_resolution_timeout: Optional[float] = None
# socket_connect_timeout: Optional[float] = None
# connection_acquiring_timeout: Optional[float] = None
# new_connection_timeout: Optional[float] = None
# http_header_timeout: Optional[float] = None
# response_body_timeout: Optional[float] = None
# to create a timeout specific for a single request, either
# - create a completely new one to overwrite the default
# - or use https://docs.python.org/3/library/dataclasses.html#dataclasses.replace
# to overwrite the defaults
# 5 Minute default read timeout
DEFAULT_TIMEOUT: Final[ClientTimeout] = ClientTimeout(total=5 * 60, sock_connect=30)
# https://www.rfc-editor.org/rfc/rfc9110#section-9.2.2
IDEMPOTENT_METHODS = frozenset({"GET", "HEAD", "OPTIONS", "TRACE", "PUT", "DELETE"})
_RetType = TypeVar("_RetType", ClientResponse, ClientWebSocketResponse)
_CharsetResolver = Callable[[ClientResponse, bytes], str]
@final
| ClientTimeout |
python | dask__dask | dask/dataframe/io/orc/arrow.py | {
"start": 159,
"end": 4485
} | class ____:
@classmethod
def read_metadata(
cls,
fs,
paths,
columns,
index,
split_stripes,
aggregate_files,
**kwargs,
):
# Convert root directory to file list.
# TODO: Handle hive-partitioned data
if len(paths) == 1 and not fs.isfile(paths[0]):
paths = fs.find(paths[0])
schema = None
parts = []
def _get_schema(_o, schema):
if schema is None:
schema = _o.schema
elif schema != _o.schema:
raise ValueError("Incompatible schemas while parsing ORC files")
return schema
if split_stripes:
offset = 0
for path in paths:
with fs.open(path, "rb") as f:
o = orc.ORCFile(f)
if schema is None:
schema = o.schema
elif schema != o.schema:
raise ValueError("Incompatible schemas while parsing ORC files")
_stripes = list(range(o.nstripes))
if offset:
parts.append([(path, _stripes[0:offset])])
while offset < o.nstripes:
parts.append(
[(path, _stripes[offset : offset + int(split_stripes)])]
)
offset += int(split_stripes)
if aggregate_files and int(split_stripes) > 1:
offset -= o.nstripes
else:
offset = 0
else:
for path in paths:
if schema is None:
with fs.open(paths[0], "rb") as f:
o = orc.ORCFile(f)
schema = o.schema
parts.append([(path, None)])
schema = _get_pyarrow_dtypes(schema, categories=None)
if columns is not None:
ex = set(columns) - set(schema)
if ex:
raise ValueError(
f"Requested columns ({ex}) not in schema ({set(schema)})"
)
# Check if we can aggregate adjacent parts together
parts = cls._aggregate_files(aggregate_files, split_stripes, parts)
columns = list(schema) if columns is None else columns
index = [index] if isinstance(index, str) else index
meta = _meta_from_dtypes(columns, schema, index, [])
return parts, schema, meta
@classmethod
def _aggregate_files(cls, aggregate_files, split_stripes, parts):
if aggregate_files is True and int(split_stripes) > 1 and len(parts) > 1:
new_parts = []
new_part = parts[0]
nstripes = len(new_part[0][1])
for part in parts[1:]:
next_nstripes = len(part[0][1])
if next_nstripes + nstripes <= split_stripes:
new_part.append(part[0])
nstripes += next_nstripes
else:
new_parts.append(new_part)
new_part = part
nstripes = next_nstripes
new_parts.append(new_part)
return new_parts
else:
return parts
@classmethod
def read_partition(cls, fs, parts, schema, columns, **kwargs):
batches = []
for path, stripes in parts:
batches += _read_orc_stripes(fs, path, stripes, schema, columns)
return pa.Table.from_batches(batches).to_pandas(date_as_object=False)
@classmethod
def write_partition(cls, df, path, fs, filename, **kwargs):
table = pa.Table.from_pandas(df)
with fs.open(fs.sep.join([path, filename]), "wb") as f:
orc.write_table(table, f)
def _read_orc_stripes(fs, path, stripes, schema, columns):
# Construct a list of RecordBatch objects.
# Each ORC stripe will corresonpond to a single RecordBatch.
if columns is None:
columns = list(schema)
batches = []
with fs.open(path, "rb") as f:
o = orc.ORCFile(f)
_stripes = range(o.nstripes) if stripes is None else stripes
for stripe in _stripes:
batches.append(o.read_stripe(stripe, columns))
return batches
| ArrowORCEngine |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 312658,
"end": 319237
} | class ____:
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = _multiarray_tests.test_neighborhood_iterator_oob(
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
| TestStackedNeighborhoodIter |
python | wandb__wandb | wandb/sdk/data_types/plotly.py | {
"start": 729,
"end": 3357
} | class ____(Media):
"""W&B class for Plotly plots."""
_log_type = "plotly-file"
@classmethod
def make_plot_media(
cls: Type["Plotly"], val: Union["plotly.Figure", "matplotlib.artist.Artist"]
) -> Union[Image, "Plotly"]:
"""Create a Plotly object from a Plotly figure or a matplotlib artist.
<!-- lazydoc-ignore-classmethod: internal -->
"""
if util.is_matplotlib_typename(util.get_full_typename(val)):
if util.matplotlib_contains_images(val):
return Image(val)
val = util.matplotlib_to_plotly(val)
return cls(val)
def __init__(self, val: Union["plotly.Figure", "matplotlib.artist.Artist"]):
"""Initialize a Plotly object.
Args:
val: Matplotlib or Plotly figure.
"""
super().__init__()
# First, check to see if the incoming `val` object is a plotfly figure
if not util.is_plotly_figure_typename(util.get_full_typename(val)):
# If it is not, but it is a matplotlib figure, then attempt to convert it to plotly
if util.is_matplotlib_typename(util.get_full_typename(val)):
if util.matplotlib_contains_images(val):
raise ValueError(
"Plotly does not currently support converting matplotlib figures containing images. \
You can convert the plot to a static image with `wandb.Image(plt)` "
)
val = util.matplotlib_to_plotly(val)
else:
raise ValueError(
"Logged plots must be plotly figures, or matplotlib plots convertible to plotly via mpl_to_plotly"
)
tmp_path = os.path.join(MEDIA_TMP.name, runid.generate_id() + ".plotly.json")
val = _numpy_arrays_to_lists(val.to_plotly_json())
with codecs.open(tmp_path, "w", encoding="utf-8") as fp:
util.json_dump_safer(val, fp)
self._set_file(tmp_path, is_tmp=True, extension=".plotly.json")
@classmethod
def get_media_subdir(cls: Type["Plotly"]) -> str:
"""Returns the media subdirectory for Plotly plots.
<!-- lazydoc-ignore-classmethod: internal -->
"""
return os.path.join("media", "plotly")
def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict:
"""Convert the Plotly object to a JSON representation.
<!-- lazydoc-ignore: internal -->
"""
json_dict = super().to_json(run_or_artifact)
json_dict["_type"] = self._log_type
return json_dict
| Plotly |
python | apache__airflow | providers/standard/tests/unit/standard/decorators/test_python_virtualenv.py | {
"start": 1959,
"end": 12617
} | class ____:
@CLOUDPICKLE_MARKER
def test_add_cloudpickle(self, dag_maker):
@task.virtualenv(serializer="cloudpickle", system_site_packages=False)
def f():
"""Ensure cloudpickle is correctly installed."""
import cloudpickle # noqa: F401
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
@DILL_MARKER
def test_add_dill(self, dag_maker):
@task.virtualenv(serializer="dill", system_site_packages=False)
def f():
"""Ensure dill is correctly installed."""
import dill # noqa: F401
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
def test_no_requirements(self, dag_maker):
"""Tests that the python callable is invoked on task run."""
@task.virtualenv()
def f():
pass
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
@pytest.mark.parametrize(
"serializer",
[
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
],
)
def test_no_system_site_packages(self, serializer, dag_maker):
@task.virtualenv(system_site_packages=False, python_version=PYTHON_VERSION, serializer=serializer)
def f():
try:
import funcsigs # noqa: F401
except ImportError:
return True
raise Exception
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
@pytest.mark.parametrize(
"serializer",
[
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
],
)
def test_system_site_packages(self, serializer, dag_maker):
@task.virtualenv(
system_site_packages=False,
requirements=["funcsigs"],
python_version=PYTHON_VERSION,
serializer=serializer,
)
def f():
import funcsigs # noqa: F401
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
@pytest.mark.parametrize(
"serializer",
[
pytest.param("pickle", id="pickle"),
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, id="default"),
],
)
def test_with_requirements_pinned(self, serializer, dag_maker):
@task.virtualenv(
system_site_packages=False,
requirements=["funcsigs==0.4"],
python_version=PYTHON_VERSION,
serializer=serializer,
)
def f():
import funcsigs
if funcsigs.__version__ != "0.4":
raise Exception
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
@pytest.mark.parametrize(
"serializer",
[
pytest.param("pickle", id="pickle"),
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, id="default"),
],
)
def test_with_requirements_file(self, serializer, dag_maker, tmp_path):
requirements_file = tmp_path / "requirements.txt"
requirements_file.write_text("funcsigs==0.4\nattrs==23.1.0")
@task.virtualenv(
system_site_packages=False,
requirements="requirements.txt",
python_version=PYTHON_VERSION,
serializer=serializer,
)
def f():
import funcsigs
if funcsigs.__version__ != "0.4":
raise Exception
import attrs
if attrs.__version__ != "23.1.0":
raise Exception
with dag_maker(template_searchpath=tmp_path.as_posix(), serialized=True):
f()
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
@pytest.mark.parametrize(
("serializer", "extra_requirements"),
[
pytest.param("pickle", [], id="pickle"),
pytest.param("dill", ["dill"], marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", ["cloudpickle"], marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, [], id="default"),
],
)
def test_unpinned_requirements(self, serializer, extra_requirements, dag_maker):
@task.virtualenv(
system_site_packages=False,
requirements=["funcsigs", *extra_requirements],
python_version=PYTHON_VERSION,
serializer=serializer,
)
def f():
import funcsigs # noqa: F401
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
@pytest.mark.parametrize(
"serializer",
[
pytest.param("pickle", id="pickle"),
pytest.param("dill", marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, id="default"),
],
)
def test_fail(self, serializer, dag_maker):
@task.virtualenv(serializer=serializer)
def f():
raise Exception
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
with pytest.raises(CalledProcessError):
dag_maker.run_ti("f", dr)
@pytest.mark.parametrize(
("serializer", "extra_requirements"),
[
pytest.param("pickle", [], id="pickle"),
pytest.param("dill", ["dill"], marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", ["cloudpickle"], marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, [], id="default"),
],
)
def test_python_3(self, serializer, extra_requirements, dag_maker):
@task.virtualenv(python_version="3", serializer=serializer, requirements=extra_requirements)
def f():
import sys
print(sys.version)
try:
{}.iteritems()
except AttributeError:
return
raise Exception
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
@pytest.mark.parametrize(
("serializer", "extra_requirements"),
[
pytest.param("pickle", [], id="pickle"),
pytest.param("dill", ["dill"], marks=DILL_MARKER, id="dill"),
pytest.param("cloudpickle", ["cloudpickle"], marks=CLOUDPICKLE_MARKER, id="cloudpickle"),
pytest.param(None, [], id="default"),
],
)
def test_with_args(self, serializer, extra_requirements, dag_maker):
@task.virtualenv(serializer=serializer, requirements=extra_requirements)
def f(a, b, c=False, d=False):
if a == 0 and b == 1 and c and not d:
return True
raise Exception
with dag_maker(serialized=True):
f(0, 1, c=True)
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
def test_return_none(self, dag_maker):
@task.virtualenv
def f():
return None
with dag_maker(serialized=True):
f()
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
def test_nonimported_as_arg(self, dag_maker):
@task.virtualenv
def f(_):
return None
with dag_maker(serialized=True):
f(datetime.datetime.now(tz=datetime.timezone.utc))
dr = dag_maker.create_dagrun()
dag_maker.run_ti("f", dr)
def test_marking_virtualenv_python_task_as_setup(self, dag_maker):
@setup
@task.virtualenv
def f():
return 1
with dag_maker(serialized=True) as dag:
f()
dr = dag_maker.create_dagrun()
assert len(dag.task_group.children) == 1
setup_task = dag.task_group.children["f"]
assert setup_task.is_setup
dag_maker.run_ti("f", dr)
def test_marking_virtualenv_python_task_as_teardown(self, dag_maker):
@teardown
@task.virtualenv
def f():
return 1
with dag_maker(serialized=True) as dag:
f()
dr = dag_maker.create_dagrun()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["f"]
assert teardown_task.is_teardown
dag_maker.run_ti("f", dr)
@pytest.mark.parametrize("on_failure_fail_dagrun", [True, False])
def test_marking_virtualenv_python_task_as_teardown_with_on_failure_fail(
self, dag_maker, on_failure_fail_dagrun
):
@teardown(on_failure_fail_dagrun=on_failure_fail_dagrun)
@task.virtualenv
def f():
return 1
with dag_maker(serialized=True) as dag:
f()
dr = dag_maker.create_dagrun()
assert len(dag.task_group.children) == 1
teardown_task = dag.task_group.children["f"]
assert teardown_task.is_teardown
assert teardown_task.on_failure_fail_dagrun is on_failure_fail_dagrun
dag_maker.run_ti("f", dr)
def test_invalid_annotation(self, dag_maker):
import uuid
unique_id = uuid.uuid4().hex
value = {"unique_id": unique_id}
# Functions that throw an error
# if `from __future__ import annotations` is missing
@task.virtualenv(multiple_outputs=False, do_xcom_push=True)
def in_venv(value: dict[str, _Invalid]) -> _Invalid:
assert isinstance(value, dict)
return value["unique_id"]
with dag_maker(serialized=True):
in_venv(value)
dr = dag_maker.create_dagrun()
dag_maker.run_ti("in_venv", dr)
ti = dr.get_task_instances()[0]
assert ti.state == TaskInstanceState.SUCCESS
xcom = ti.xcom_pull(task_ids=ti.task_id, key="return_value")
assert isinstance(xcom, str)
assert xcom == unique_id
| TestPythonVirtualenvDecorator |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 32521,
"end": 34853
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Qwen2_5OmniAudioEncoderConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Qwen2_5OmniAudioAttention(config)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states = self.self_attn(
hidden_states=hidden_states,
cu_seqlens=cu_seqlens,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16:
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
return outputs
| Qwen2_5OmniAudioEncoderLayer |
python | scipy__scipy | scipy/integrate/tests/test_cubature.py | {
"start": 35540,
"end": 36604
} | class ____:
@pytest.mark.parametrize(("a", "b", "points"), [
(
[0, 1, -math.inf],
[1, math.inf, math.inf],
[
[1, 1, 1],
[0.5, 10, 10],
]
)
])
def test_infinite_limits_maintains_points(self, a, b, points, xp):
"""
Test that break points are correctly mapped under the _InfiniteLimitsTransform
transformation.
"""
points = [xp.asarray(p, dtype=xp.float64) for p in points]
f_transformed = _InfiniteLimitsTransform(
# Bind `points` and `xp` argument in f
lambda x: f_with_problematic_points(x, points, xp),
xp.asarray(a, dtype=xp.float64),
xp.asarray(b, dtype=xp.float64),
xp=xp,
)
for point in points:
transformed_point = f_transformed.inv(xp.reshape(point, (1, -1)))
with pytest.raises(Exception, match="called with a problematic point"):
f_transformed(transformed_point)
| TestTransformations |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_coding_agents.py | {
"start": 1403,
"end": 1919
} | class ____(CodingAgentIntegration):
"""Mock coding agent installation for tests."""
def get_client(self):
return MockCodingAgentClient()
def launch(self, request: CodingAgentLaunchRequest) -> CodingAgentState:
return CodingAgentState(
id="mock-123",
status=CodingAgentStatus.PENDING,
provider=CodingAgentProviderType.CURSOR_BACKGROUND_AGENT,
name="Mock Agent",
started_at=datetime.now(UTC),
)
| MockCodingAgentInstallation |
python | bokeh__bokeh | src/bokeh/models/widgets/tables.py | {
"start": 2638,
"end": 2901
} | class ____(Model):
''' Abstract base class for data table's cell formatters.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@abstract
| CellFormatter |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_collection_membership_files.py | {
"start": 1212,
"end": 1569
} | class ____(
GQLResult
):
page_info: PageInfoFragment = Field(alias="pageInfo")
edges: List[
ArtifactCollectionMembershipFilesProjectArtifactCollectionArtifactMembershipFilesEdges
]
total_count: Optional[int] = Field(alias="totalCount", default=None)
| ArtifactCollectionMembershipFilesProjectArtifactCollectionArtifactMembershipFiles |
python | huggingface__transformers | src/transformers/models/mixtral/modeling_mixtral.py | {
"start": 30835,
"end": 30938
} | class ____(GenericForTokenClassification, MixtralPreTrainedModel):
pass
| MixtralForTokenClassification |
python | psf__black | tests/data/cases/allow_empty_first_line.py | {
"start": 1524,
"end": 1794
} | class ____:
def method(self):
pass
async def async_fn():
"""Docstring."""
@decorated
async def async_fn():
"""Docstring."""
def top_level(
a: int,
b: str,
) -> Whatever[Generic, Something]:
def nested(x: int) -> int:
pass
| Cls |
python | pytorch__pytorch | test/test_jit_disabled.py | {
"start": 2113,
"end": 2396
} | class ____(torch.nn.Module):
def forward(self, input):
pass
sm = torch.jit.script(AModule())
print("Didn't throw exception")
"""
self.compare_enabled_disabled(_program_string)
if __name__ == '__main__':
if sys.version_info < (3, 14):
run_tests()
| AModule |
python | ray-project__ray | release/ray_release/tests/test_glue.py | {
"start": 1745,
"end": 1833
} | class ____(Test):
def get_anyscale_byod_image(self) -> str:
return ""
| MockTest |
python | cherrypy__cherrypy | cherrypy/process/win32.py | {
"start": 4380,
"end": 5348
} | class ____(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.items():
if val is obj:
return key
raise ValueError('The given object could not be found: %r' % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
"""Send a control command to a service."""
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
| _ControlCodes |
python | huggingface__transformers | src/transformers/models/esm/modeling_esm.py | {
"start": 37719,
"end": 39114
} | class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def create_position_ids_from_input_ids(input_ids, padding_idx):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
__all__ = [
"EsmForMaskedLM",
"EsmForSequenceClassification",
"EsmForTokenClassification",
"EsmModel",
"EsmPreTrainedModel",
]
| EsmClassificationHead |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/build_systems/perl.py | {
"start": 638,
"end": 1253
} | class ____(BuilderWithDefaults):
phases = ("configure", "build", "install")
package_methods = ("check", "test_use")
package_attributes = ()
build_time_test_callbacks = ["check"]
def configure(self, pkg: PerlPackage, spec: Spec, prefix: Prefix) -> None:
pass
def build(self, pkg: PerlPackage, spec: Spec, prefix: Prefix) -> None:
pass
def install(self, pkg: PerlPackage, spec: Spec, prefix: Prefix) -> None:
pass
def check(self):
pass
# Ensure that tests run after build (if requested):
run_after("build")(execute_build_time_tests)
| PerlBuilder |
python | pytest-dev__pytest | testing/test_junitxml.py | {
"start": 946,
"end": 2543
} | class ____:
def __init__(self, pytester: Pytester, schema: xmlschema.XMLSchema) -> None:
self.pytester = pytester
self.schema = schema
def __call__(
self, *args: str | os.PathLike[str], family: str | None = "xunit1"
) -> tuple[RunResult, DomDocument]:
if family:
args = ("-o", "junit_family=" + family, *args)
xml_path = self.pytester.path.joinpath("junit.xml")
result = self.pytester.runpytest(f"--junitxml={xml_path}", *args)
if family == "xunit2":
with xml_path.open(encoding="utf-8") as f:
self.schema.validate(f)
xmldoc = minidom.parse(str(xml_path))
return result, DomDocument(xmldoc)
@pytest.fixture
def run_and_parse(pytester: Pytester, schema: xmlschema.XMLSchema) -> RunAndParse:
"""Fixture that returns a function that can be used to execute pytest and
return the parsed ``DomNode`` of the root xml node.
The ``family`` parameter is used to configure the ``junit_family`` of the written report.
"xunit2" is also automatically validated against the schema.
"""
return RunAndParse(pytester, schema)
def assert_attr(node: minidom.Element, **kwargs: object) -> None:
__tracebackhide__ = True
def nodeval(node: minidom.Element, name: str) -> str | None:
anode = node.getAttributeNode(name)
return anode.value if anode is not None else None
expected = {name: str(value) for name, value in kwargs.items()}
on_node = {name: nodeval(node, name) for name in expected}
assert on_node == expected
| RunAndParse |
python | streamlit__streamlit | lib/tests/streamlit/data_test_cases.py | {
"start": 3617,
"end": 3966
} | class ____:
"""A dummy dataframe-like class that supports the dataframe interchange protocol
(__dataframe__ method).
"""
def __init__(self, data: pd.DataFrame):
self._data: pd.DataFrame = data
def __dataframe__(self, allow_copy: bool = True):
return self._data.__dataframe__(allow_copy=allow_copy)
| CustomDataframe |
python | django-guardian__django-guardian | guardian/exceptions.py | {
"start": 718,
"end": 860
} | class ____(GuardianError):
"""Raised when an operation is attempted on both user/group and object."""
pass
| MultipleIdentityAndObjectError |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/logging_ops_test.py | {
"start": 13491,
"end": 14505
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testPrintShape(self):
inp = constant_op.constant(2.0, shape=[100, 32])
inp_printed = logging_ops.Print(inp, [inp])
self.assertEqual(inp.get_shape(), inp_printed.get_shape())
def testPrintString(self):
inp = constant_op.constant(2.0, shape=[100, 32])
inp_printed = logging_ops.Print(inp, ["hello"])
self.assertEqual(inp.get_shape(), inp_printed.get_shape())
@test_util.run_deprecated_v1
def testPrintGradient(self):
inp = constant_op.constant(2.0, shape=[100, 32], name="in")
w = constant_op.constant(4.0, shape=[10, 100], name="w")
wx = math_ops.matmul(w, inp, name="wx")
wx_print = logging_ops.Print(wx, [w, w, w])
wx_grad = gradients_impl.gradients(wx, w)[0]
wx_print_grad = gradients_impl.gradients(wx_print, w)[0]
wxg = self.evaluate(wx_grad)
wxpg = self.evaluate(wx_print_grad)
self.assertAllEqual(wxg, wxpg)
if __name__ == "__main__":
test.main()
| PrintGradientTest |
python | jazzband__django-simple-history | simple_history/tests/tests/test_commands.py | {
"start": 784,
"end": 6689
} | class ____(TestCase):
command_name = "populate_history"
command_error = (management.CommandError, SystemExit)
def test_no_args(self):
out = StringIO()
management.call_command(self.command_name, stdout=out, stderr=StringIO())
self.assertIn(populate_history.Command.COMMAND_HINT, out.getvalue())
def test_bad_args(self):
test_data = (
(populate_history.Command.MODEL_NOT_HISTORICAL, ("tests.place",)),
(populate_history.Command.MODEL_NOT_FOUND, ("invalid.model",)),
(populate_history.Command.MODEL_NOT_FOUND, ("bad_key",)),
)
for msg, args in test_data:
out = StringIO()
self.assertRaises(
self.command_error,
management.call_command,
self.command_name,
*args,
stdout=StringIO(),
stderr=out,
)
self.assertIn(msg, out.getvalue())
def test_auto_populate(self):
Poll.objects.create(question="Will this populate?", pub_date=datetime.now())
Poll.history.all().delete()
management.call_command(
self.command_name, auto=True, stdout=StringIO(), stderr=StringIO()
)
self.assertEqual(Poll.history.all().count(), 1)
def test_populate_with_custom_batch_size(self):
Poll.objects.create(question="Will this populate?", pub_date=datetime.now())
Poll.history.all().delete()
management.call_command(
self.command_name,
auto=True,
batchsize=500,
stdout=StringIO(),
stderr=StringIO(),
)
self.assertEqual(Poll.history.all().count(), 1)
def test_specific_populate(self):
Poll.objects.create(question="Will this populate?", pub_date=datetime.now())
Poll.history.all().delete()
Book.objects.create(isbn="9780007117116")
Book.history.all().delete()
management.call_command(
self.command_name, "tests.book", stdout=StringIO(), stderr=StringIO()
)
self.assertEqual(Book.history.all().count(), 1)
self.assertEqual(Poll.history.all().count(), 0)
def test_failing_wont_save(self):
Poll.objects.create(question="Will this populate?", pub_date=datetime.now())
Poll.history.all().delete()
self.assertRaises(
self.command_error,
management.call_command,
self.command_name,
"tests.poll",
"tests.invalid_model",
stdout=StringIO(),
stderr=StringIO(),
)
self.assertEqual(Poll.history.all().count(), 0)
def test_multi_table(self):
data = {"rating": 5, "name": "Tea 'N More"}
Restaurant.objects.create(**data)
Restaurant.updates.all().delete()
management.call_command(
self.command_name, "tests.restaurant", stdout=StringIO(), stderr=StringIO()
)
update_record = Restaurant.updates.all()[0]
for attr, value in data.items():
self.assertEqual(getattr(update_record, attr), value)
def test_existing_objects(self):
data = {"rating": 5, "name": "Tea 'N More"}
out = StringIO()
Restaurant.objects.create(**data)
pre_call_count = Restaurant.updates.count()
management.call_command(
self.command_name, "tests.restaurant", stdout=StringIO(), stderr=out
)
self.assertEqual(Restaurant.updates.count(), pre_call_count)
self.assertIn(populate_history.Command.EXISTING_HISTORY_FOUND, out.getvalue())
def test_no_historical(self):
out = StringIO()
with replace_registry({"test_place": Place}):
management.call_command(self.command_name, auto=True, stdout=out)
self.assertIn(populate_history.Command.NO_REGISTERED_MODELS, out.getvalue())
def test_batch_processing_with_batch_size_less_than_total(self):
data = [
Poll(id=1, question="Question 1", pub_date=datetime.now()),
Poll(id=2, question="Question 2", pub_date=datetime.now()),
Poll(id=3, question="Question 3", pub_date=datetime.now()),
Poll(id=4, question="Question 4", pub_date=datetime.now()),
]
Poll.objects.bulk_create(data)
management.call_command(
self.command_name,
auto=True,
batchsize=3,
stdout=StringIO(),
stderr=StringIO(),
)
self.assertEqual(Poll.history.count(), 4)
def test_stdout_not_printed_when_verbosity_is_0(self):
out = StringIO()
Poll.objects.create(question="Question 1", pub_date=datetime.now())
management.call_command(
self.command_name,
auto=True,
batchsize=3,
stdout=out,
stderr=StringIO(),
verbosity=0,
)
self.assertEqual(out.getvalue(), "")
def test_stdout_printed_when_verbosity_is_not_specified(self):
out = StringIO()
Poll.objects.create(question="Question 1", pub_date=datetime.now())
management.call_command(
self.command_name, auto=True, batchsize=3, stdout=out, stderr=StringIO()
)
self.assertNotEqual(out.getvalue(), "")
def test_excluded_fields(self):
poll = PollWithExcludeFields.objects.create(
question="Will this work?", pub_date=datetime.now()
)
PollWithExcludeFields.history.all().delete()
management.call_command(
self.command_name,
"tests.pollwithexcludefields",
auto=True,
stdout=StringIO(),
stderr=StringIO(),
)
initial_history_record = PollWithExcludeFields.history.all()[0]
self.assertEqual(initial_history_record.question, poll.question)
| TestPopulateHistory |
python | tensorflow__tensorflow | tensorflow/python/training/experimental/loss_scale_optimizer_test.py | {
"start": 4336,
"end": 13974
} | class ____(test.TestCase,
parameterized.TestCase):
def _run_if_in_graph_mode(self, val):
# Running only in graph mode is useful, because optimizers sometimes return
# a value that, in Graph mode, is runnable with self.evaluate. But in Eager
# mode, the optimizer already does the computations and the return value
# cannot be run.
if not context.executing_eagerly():
self.evaluate(val)
def _run_fn_with_grad_check(self, strategy, var, opt, expected_grad):
grad_check_fn = create_identity_with_grad_check_fn(
expected_grad)
loss = lambda: grad_check_fn(var) / strategy.num_replicas_in_sync
return lambda: opt.minimize(loss, var_list=[var])
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testFixedLossScaleAppliedToLossWithMinimize(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = variables.Variable([5.0])
opt = gradient_descent.GradientDescentOptimizer(2.0)
loss_scale = 10.
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(
opt, loss_scale)
# We need num_replicas_in_sync to divide loss_scale, otherwise loss_scale
# / strategy.num_replicas_in_sync will not be exact, which could lead to
# assertion failures due to rounding issues.
self.assertEqual(loss_scale % strategy.num_replicas_in_sync, 0)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, loss_scale / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
@test_util.deprecated_graph_mode_only
def testFixedLossScaleAppliedToLossWithGetGradients(self):
var = variables.Variable([2.0])
opt = gradient_descent.GradientDescentOptimizer(1.0)
loss_scale = 10.
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(opt, loss_scale)
grad_check_fn = create_identity_with_grad_check_fn(loss_scale)
loss = grad_check_fn(var)
run_op = get_gradients(opt, loss, [var])
self.evaluate(variables.global_variables_initializer())
# This will cause an assertion to run, as
# create_identity_with_grad_check_fn added an assertion op.
self.evaluate(run_op)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testDynamicLossScale(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
expected_gradient = resource_variable_ops.ResourceVariable(
learning_rate / strategy.num_replicas_in_sync)
with strategy.scope():
var = variables.Variable([5.0])
opt = gradient_descent.GradientDescentOptimizer(learning_rate)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=2, increment_period=1, multiplier=2)
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(
opt, loss_scale)
self.assertEqual(
loss_scale.initial_loss_scale % strategy.num_replicas_in_sync, 0)
run_fn = self._run_fn_with_grad_check(strategy, var, opt,
expected_gradient)
run_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
# Loss scale will be double, so the expected gradient is also doubled.
self.evaluate(
expected_gradient.assign(2 * learning_rate /
strategy.num_replicas_in_sync))
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# As before, the 2 is subtracted from the variable, making it's new value
# 1.
self.assertAllClose([1.], self.evaluate(var))
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testDynamicUpdate(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = variables.Variable([1.0, 2.0])
opt = gradient_descent.GradientDescentOptimizer(1.0)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=2, increment_period=1, multiplier=2)
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(
opt, loss_scale)
# Test optimizer with finite gradients
loss = lambda: var * 2.0 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Gradient is 2, so variable will have 2 subtracted from it
self.assertAllClose([-1.0, 0.0], self.evaluate(var))
# Loss scale has doubled from 2 to 4
self.assertEqual(4., self.evaluate(opt._loss_scale()))
# Test optimizer with NaN gradients
loss = lambda: var * float('NaN')
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [-1.0, 0.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(2., self.evaluate(opt._loss_scale()))
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testDynamicLossScaleWithSlots(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = variables.Variable([1.0, 2.0])
# An SGD optimizer with momentum has slot variables.
opt = momentum.MomentumOptimizer(1.0, momentum=1.)
initial_loss_scale = 2.
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=initial_loss_scale,
increment_period=1,
multiplier=4)
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(
opt, loss_scale)
loss = lambda: var / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The momentum accumulator starts at 0 and the gradient is 1. The
# accumulator is incremented by the gradient, so it is now 1. Then the
# variable is subtracted by the accumulator, so the variable is subtracted
# by 1.
self.assertAllClose([0.0, 1.0], self.evaluate(var))
self.assertEqual(self.evaluate(opt._loss_scale()), initial_loss_scale * 4)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# The momentum accumulator was 1 before this step and the gradient is 1.
# The accumulator is incremented by the gradient, so it is now 2. Then the
# variable is subtracted by the accumulator, so the variable is subtracted
# by 2.
self.assertAllClose([-2., -1.], self.evaluate(var))
self.assertEqual(
self.evaluate(opt._loss_scale()), initial_loss_scale * 16)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testCheckpoint(self, strategy_fn):
strategy = strategy_fn()
if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and
not context.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
with self.test_session(), strategy.scope():
# Build and run a simple model.
var = variables.Variable([2.0])
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=1., increment_period=2., multiplier=2.)
opt = momentum.MomentumOptimizer(1.0, momentum=1.)
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(
opt, loss_scale)
run_fn = lambda: opt.minimize(lambda: var + 1., var_list=[var])
opt_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertEqual(self.evaluate(loss_scale()), 1.)
self.assertEqual(self.evaluate(loss_scale._num_good_steps), 1)
# Save a checkpoint.
checkpoint = trackable_utils.Checkpoint(optimizer=opt)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
# Run model again.
self.evaluate(strategy.experimental_run(run_fn))
self.assertEqual(self.evaluate(loss_scale()), 2.)
self.assertEqual(self.evaluate(loss_scale._num_good_steps), 0)
# Load checkpoint and ensure loss scale is back to it's original value.
status = checkpoint.restore(save_path)
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(self.evaluate(loss_scale()), 1.)
self.assertEqual(self.evaluate(loss_scale._num_good_steps), 1)
def testPassingNoneToLossScale(self):
opt = gradient_descent.GradientDescentOptimizer(1.0)
with self.assertRaisesRegex(ValueError, r'loss_scale cannot be None'):
loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(opt, None)
if __name__ == '__main__':
test.main()
| MixedPrecisionLossScaleOptimizerTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ranges.py | {
"start": 30448,
"end": 30578
} | class ____(AbstractSingleRange[int]):
"""Represent the PostgreSQL INT8RANGE type."""
__visit_name__ = "INT8RANGE"
| INT8RANGE |
python | Pylons__pyramid | tests/test_events.py | {
"start": 4223,
"end": 4845
} | class ____(ContextFoundEventTests):
def _getTargetClass(self):
from pyramid.events import AfterTraversal
return AfterTraversal
def test_class_conforms_to_IAfterTraversal(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import IAfterTraversal
verifyClass(IAfterTraversal, self._getTargetClass())
def test_instance_conforms_to_IAfterTraversal(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IAfterTraversal
verifyObject(IAfterTraversal, self._makeOne())
| AfterTraversalEventTests |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_benchmark.py | {
"start": 99,
"end": 5855
} | class ____:
"""
Defines a global optimization benchmark problem.
This abstract class defines the basic structure of a global
optimization problem. Subclasses should implement the ``fun`` method
for a particular optimization problem.
Attributes
----------
N : int
The dimensionality of the problem.
bounds : sequence
The lower/upper bounds to be used for minimizing the problem.
This a list of (lower, upper) tuples that contain the lower and upper
bounds for the problem. The problem should not be asked for evaluation
outside these bounds. ``len(bounds) == N``.
xmin : sequence
The lower bounds for the problem
xmax : sequence
The upper bounds for the problem
fglob : float
The global minimum of the evaluated function.
global_optimum : sequence
A list of vectors that provide the locations of the global minimum.
Note that some problems have multiple global minima, not all of which
may be listed.
nfev : int
the number of function evaluations that the object has been asked to
calculate.
change_dimensionality : bool
Whether we can change the benchmark function `x` variable length (i.e.,
the dimensionality of the problem)
custom_bounds : sequence
a list of tuples that contain lower/upper bounds for use in plotting.
"""
change_dimensionality = False
def __init__(self, dimensions):
"""
Initialises the problem
Parameters
----------
dimensions : int
The dimensionality of the problem
"""
self._dimensions = dimensions
self.nfev = 0
self.fglob = np.nan
self.global_optimum = None
self.custom_bounds = None
def __str__(self):
return f'{self.__class__.__name__} ({self.N} dimensions)'
def __repr__(self):
return self.__class__.__name__
def initial_vector(self):
"""
Random initialisation for the benchmark problem.
Returns
-------
x : sequence
a vector of length ``N`` that contains random floating point
numbers that lie between the lower and upper bounds for a given
parameter.
"""
return asarray([np.random.uniform(l, u) for l, u in self.bounds])
def success(self, x, tol=1.e-5):
"""
Tests if a candidate solution at the global minimum.
The default test is
Parameters
----------
x : sequence
The candidate vector for testing if the global minimum has been
reached. Must have ``len(x) == self.N``
tol : float
The evaluated function and known global minimum must differ by less
than this amount to be at a global minimum.
Returns
-------
bool : is the candidate vector at the global minimum?
"""
val = self.fun(asarray(x))
if abs(val - self.fglob) < tol:
return True
# the solution should still be in bounds, otherwise immediate fail.
bounds = np.asarray(self.bounds, dtype=np.float64)
if np.any(x > bounds[:, 1]):
return False
if np.any(x < bounds[:, 0]):
return False
# you found a lower global minimum. This shouldn't happen.
if val < self.fglob:
raise ValueError("Found a lower global minimum",
x,
val,
self.fglob)
return False
def fun(self, x):
"""
Evaluation of the benchmark function.
Parameters
----------
x : sequence
The candidate vector for evaluating the benchmark problem. Must
have ``len(x) == self.N``.
Returns
-------
val : float
the evaluated benchmark function
"""
raise NotImplementedError
def change_dimensions(self, ndim):
"""
Changes the dimensionality of the benchmark problem
The dimensionality will only be changed if the problem is suitable
Parameters
----------
ndim : int
The new dimensionality for the problem.
"""
if self.change_dimensionality:
self._dimensions = ndim
else:
raise ValueError('dimensionality cannot be changed for this'
'problem')
@property
def bounds(self):
"""
The lower/upper bounds to be used for minimizing the problem.
This a list of (lower, upper) tuples that contain the lower and upper
bounds for the problem. The problem should not be asked for evaluation
outside these bounds. ``len(bounds) == N``.
"""
if self.change_dimensionality:
return [self._bounds[0]] * self.N
else:
return self._bounds
@property
def N(self):
"""
The dimensionality of the problem.
Returns
-------
N : int
The dimensionality of the problem
"""
return self._dimensions
@property
def xmin(self):
"""
The lower bounds for the problem
Returns
-------
xmin : sequence
The lower bounds for the problem
"""
return asarray([b[0] for b in self.bounds])
@property
def xmax(self):
"""
The upper bounds for the problem
Returns
-------
xmax : sequence
The upper bounds for the problem
"""
return asarray([b[1] for b in self.bounds])
| Benchmark |
python | pytorch__pytorch | torch/ao/nn/quantized/dynamic/modules/rnn.py | {
"start": 45702,
"end": 48059
} | class ____(RNNCellBase):
r"""An Elman RNN cell with tanh or ReLU non-linearity.
A dynamic quantized RNNCell module with floating point tensor as inputs and outputs.
Weights are quantized to 8 bits. We adopt the same interface as `torch.nn.RNNCell`,
please see https://pytorch.org/docs/stable/nn.html#torch.nn.RNNCell for documentation.
Examples::
>>> # xdoctest: +SKIP
>>> rnn = nn.RNNCell(10, 20)
>>> input = torch.randn(6, 3, 10)
>>> hx = torch.randn(3, 20)
>>> output = []
>>> for i in range(6):
... hx = rnn(input[i], hx)
... output.append(hx)
"""
__constants__ = ["input_size", "hidden_size", "bias", "nonlinearity"]
def __init__(
self, input_size, hidden_size, bias=True, nonlinearity="tanh", dtype=torch.qint8
):
super().__init__(input_size, hidden_size, bias, num_chunks=1, dtype=dtype)
self.nonlinearity = nonlinearity
def _get_name(self):
return "DynamicQuantizedRNNCell"
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
self.check_forward_input(input)
if hx is None:
hx = torch.zeros(
input.size(0), self.hidden_size, dtype=input.dtype, device=input.device
)
self.check_forward_hidden(input, hx, "")
if self.nonlinearity == "tanh":
ret = torch.ops.quantized.quantized_rnn_tanh_cell_dynamic(
input,
hx,
self._packed_weight_ih,
self._packed_weight_hh,
self.bias_ih,
self.bias_hh,
)
elif self.nonlinearity == "relu":
ret = torch.ops.quantized.quantized_rnn_relu_cell_dynamic(
input,
hx,
self._packed_weight_ih,
self._packed_weight_hh,
self.bias_ih,
self.bias_hh,
)
else:
ret = input # TODO: remove when jit supports exception flow
raise RuntimeError(f"Unknown nonlinearity: {self.nonlinearity}")
return ret
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
return super().from_float(
mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
| RNNCell |
python | RobertCraigie__pyright-python | src/pyright/errors.py | {
"start": 499,
"end": 545
} | class ____(NodeError):
pass
| VersionCheckFailed |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/job_index.py | {
"start": 403,
"end": 4391
} | class ____:
job_snapshot: JobSnap
parent_job_snapshot: Optional[JobSnap]
_node_defs_snaps_index: Mapping[str, Union[OpDefSnap, GraphDefSnap]]
_dagster_type_snaps_by_name_index: Mapping[str, DagsterTypeSnap]
dep_structure_index: DependencyStructureIndex
_comp_dep_structures: Mapping[str, DependencyStructureIndex]
_job_snapshot_id: Optional[str]
def __init__(
self,
job_snapshot: JobSnap,
parent_job_snapshot: Optional[JobSnap],
):
self.job_snapshot = check.inst_param(job_snapshot, "job_snapshot", JobSnap)
self.parent_job_snapshot = check.opt_inst_param(
parent_job_snapshot, "parent_job_snapshot", JobSnap
)
node_def_snaps: Sequence[Union[OpDefSnap, GraphDefSnap]] = [
*job_snapshot.node_defs_snapshot.op_def_snaps,
*job_snapshot.node_defs_snapshot.graph_def_snaps,
]
self._node_defs_snaps_index = {sd.name: sd for sd in node_def_snaps}
self._dagster_type_snaps_by_name_index = {
dagster_type_snap.name: dagster_type_snap
for dagster_type_snap in job_snapshot.dagster_type_namespace_snapshot.all_dagster_type_snaps_by_key.values()
if dagster_type_snap.name
}
self.dep_structure_index = DependencyStructureIndex(job_snapshot.dep_structure_snapshot)
self._comp_dep_structures = {
comp_snap.name: DependencyStructureIndex(comp_snap.dep_structure_snapshot)
for comp_snap in job_snapshot.node_defs_snapshot.graph_def_snaps
}
@property
def name(self) -> str:
return self.job_snapshot.name
@property
def description(self) -> Optional[str]:
return self.job_snapshot.description
@property
def tags(self) -> Mapping[str, Any]:
return self.job_snapshot.tags
@property
def metadata(self):
return self.job_snapshot.metadata
@property
def owners(self):
return self.job_snapshot.owners
@property
def job_snapshot_id(self) -> str:
return self.job_snapshot.snapshot_id
def has_dagster_type_name(self, type_name: str) -> bool:
return type_name in self._dagster_type_snaps_by_name_index
def get_dagster_type_from_name(self, type_name: str) -> DagsterTypeSnap:
return self._dagster_type_snaps_by_name_index[type_name]
def get_node_def_snap(self, node_def_name: str) -> Union[OpDefSnap, GraphDefSnap]:
check.str_param(node_def_name, "node_def_name")
return self._node_defs_snaps_index[node_def_name]
def get_dep_structure_index(self, graph_def_name: str) -> DependencyStructureIndex:
return self._comp_dep_structures[graph_def_name]
def get_dagster_type_snaps(self) -> Sequence[DagsterTypeSnap]:
dt_namespace = self.job_snapshot.dagster_type_namespace_snapshot
return list(dt_namespace.all_dagster_type_snaps_by_key.values())
def has_node_invocation(self, node_name: str) -> bool:
return self.dep_structure_index.has_invocation(node_name)
def get_default_mode_name(self) -> str:
return self.job_snapshot.mode_def_snaps[0].name
def has_mode_def(self, name: str) -> bool:
check.str_param(name, "name")
for mode_def_snap in self.job_snapshot.mode_def_snaps:
if mode_def_snap.name == name:
return True
return False
@property
def available_modes(self) -> Sequence[str]:
return [mode_def_snap.name for mode_def_snap in self.job_snapshot.mode_def_snaps]
def get_mode_def_snap(self, name: str) -> ModeDefSnap:
check.str_param(name, "name")
for mode_def_snap in self.job_snapshot.mode_def_snaps:
if mode_def_snap.name == name:
return mode_def_snap
check.failed(f"Mode {name} not found")
@property
def config_schema_snapshot(self) -> ConfigSchemaSnapshot:
return self.job_snapshot.config_schema_snapshot
| JobIndex |
python | huggingface__transformers | tests/models/vaultgemma/test_modeling_vaultgemma.py | {
"start": 1446,
"end": 1585
} | class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = VaultGemmaModel
@require_torch
| VaultGemmaModelTester |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/common/parameters.py | {
"start": 5345,
"end": 6732
} | class ____(BaseParam[str]):
"""Search on attribute."""
def __init__(self, attribute: ColumnElement, skip_none: bool = True) -> None:
super().__init__(skip_none=skip_none)
self.attribute: ColumnElement = attribute
def to_orm(self, select: Select) -> Select:
if self.value is None and self.skip_none:
return select
return select.where(self.attribute.ilike(f"%{self.value}%"))
def transform_aliases(self, value: str | None) -> str | None:
if value == "~":
value = "%"
return value
@classmethod
def depends(cls, *args: Any, **kwargs: Any) -> Self:
raise NotImplementedError("Use search_param_factory instead , depends is not implemented.")
def search_param_factory(
attribute: ColumnElement,
pattern_name: str,
skip_none: bool = True,
) -> Callable[[str | None], _SearchParam]:
DESCRIPTION = (
"SQL LIKE expression — use `%` / `_` wildcards (e.g. `%customer_%`). "
"Regular expressions are **not** supported."
)
def depends_search(
value: str | None = Query(alias=pattern_name, default=None, description=DESCRIPTION),
) -> _SearchParam:
search_parm = _SearchParam(attribute, skip_none)
value = search_parm.transform_aliases(value)
return search_parm.set_value(value)
return depends_search
| _SearchParam |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 72410,
"end": 75778
} | class ____(Response):
"""
Response of events.get_task_events endpoint.
:param events: Events list
:type events: Sequence[dict]
:param returned: Number of results returned
:type returned: int
:param total: Total number of results available for this query
:type total: float
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
"""
_service = "events"
_action = "get_task_events"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"events": {
"description": "Events list",
"items": {"type": "object"},
"type": ["array", "null"],
},
"returned": {
"description": "Number of results returned",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
"total": {
"description": "Total number of results available for this query",
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(
self,
events: Optional[List[dict]] = None,
returned: Optional[int] = None,
total: Optional[float] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetTaskEventsResponse, self).__init__(**kwargs)
self.events = events
self.returned = returned
self.total = total
self.scroll_id = scroll_id
@schema_property("events")
def events(self) -> Optional[List[dict]]:
return self._property_events
@events.setter
def events(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_events = None
return
self.assert_isinstance(value, "events", (list, tuple))
self.assert_isinstance(value, "events", (dict,), is_array=True)
self._property_events = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("total")
def total(self) -> Optional[float]:
return self._property_total
@total.setter
def total(self, value: Optional[float]) -> None:
if value is None:
self._property_total = None
return
self.assert_isinstance(value, "total", six.integer_types + (float,))
self._property_total = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetTaskEventsResponse |
python | huggingface__transformers | src/transformers/models/omdet_turbo/processing_omdet_turbo.py | {
"start": 1245,
"end": 1499
} | class ____(TextKwargs, total=False):
task: Optional[Union[str, list[str], TextInput, PreTokenizedInput]]
if is_torch_available():
import torch
if is_torchvision_available():
from torchvision.ops.boxes import batched_nms
| OmDetTurboTextKwargs |
python | pytorch__pytorch | torch/__init__.py | {
"start": 87574,
"end": 100754
} | class ____:
def __init__(self, backend, mode, options, dynamic):
from torch._dynamo.backends.registry import lookup_backend
if isinstance(backend, str):
self.compiler_name = backend
elif hasattr(backend, "__name__"):
self.compiler_name = backend.__name__
else:
self.compiler_name = str(backend)
self.dynamic = dynamic
self.compiler_fn = lookup_backend(backend)
self.kwargs = {}
# only pass the args if they non-empty
if mode and mode != "default":
self.kwargs["mode"] = mode
if options:
self.kwargs["options"] = options
def __eq__(self, other):
return (
isinstance(other, _TorchCompileWrapper)
and self.compiler_fn == other.compiler_fn
and self.kwargs == other.kwargs
and self.dynamic == other.dynamic
)
def __call__(self, model_, inputs_):
return self.compiler_fn(model_, inputs_, **self.kwargs)
def reset(self):
if hasattr(self.compiler_fn, "reset"):
self.compiler_fn.reset()
_InputT = _ParamSpec("_InputT")
_RetT = _TypeVar("_RetT")
@_overload
def compile(
model: _Callable[_InputT, _RetT],
*,
fullgraph: builtins.bool = False,
dynamic: _Optional[builtins.bool] = None,
backend: _Union[str, _Callable] = "inductor",
mode: _Union[str, None] = None,
options: _Optional[
dict[str, _Union[str, builtins.int, builtins.bool, _Callable]]
] = None,
disable: builtins.bool = False,
) -> _Callable[_InputT, _RetT]: ...
@_overload
def compile(
model: None = None,
*,
fullgraph: builtins.bool = False,
dynamic: _Optional[builtins.bool] = None,
backend: _Union[str, _Callable] = "inductor",
mode: _Union[str, None] = None,
options: _Optional[
dict[str, _Union[str, builtins.int, builtins.bool, _Callable]]
] = None,
disable: builtins.bool = False,
) -> _Callable[[_Callable[_InputT, _RetT]], _Callable[_InputT, _RetT]]: ...
def compile(
model: _Optional[_Callable[_InputT, _RetT]] = None,
*,
fullgraph: builtins.bool = False,
dynamic: _Optional[builtins.bool] = None,
backend: _Union[str, _Callable] = "inductor",
mode: _Union[str, None] = None,
options: _Optional[
dict[str, _Union[str, builtins.int, builtins.bool, _Callable]]
] = None,
disable: builtins.bool = False,
) -> _Union[
_Callable[[_Callable[_InputT, _RetT]], _Callable[_InputT, _RetT]],
_Callable[_InputT, _RetT],
]:
"""
Optimizes given model/function using TorchDynamo and specified backend.
If you are compiling an :class:`torch.nn.Module`, you can also use :meth:`torch.nn.Module.compile`
to compile the module inplace without changing its structure.
Concretely, for every frame executed within the compiled region, we will attempt
to compile it and cache the compiled result on the code object for future
use. A single frame may be compiled multiple times if previous compiled
results are not applicable for subsequent calls (this is called a "guard
failure"), you can use TORCH_LOGS=guards to debug these situations.
Multiple compiled results can be associated with a frame up to
``torch._dynamo.config.recompile_limit``, which defaults to 8; at which
point we will fall back to eager. Note that compile caches are per
*code object*, not frame; if you dynamically create multiple copies of a
function, they will all share the same code cache.
Args:
model (Callable or None): Module/function to optimize
fullgraph (bool): If False (default), torch.compile attempts to discover compilable regions
in the function that it will optimize. If True, then we require that the entire function be
capturable into a single graph. If this is not possible (that is, if there are graph breaks),
then this will raise an error. This also opts into unbacked semantics, notably it will turn on
capture_scalar_outputs and capture_dynamic_output_shape_ops on by default.
dynamic (bool or None): Use dynamic shape tracing. When this is True, we will up-front attempt
to generate a kernel that is as dynamic as possible to avoid recompilations when
sizes change. This may not always work as some operations/optimizations will
force specialization; use TORCH_LOGS=dynamic to debug overspecialization.
When this is False, we will NEVER generate dynamic kernels, we will always specialize.
By default (None), we automatically detect if dynamism has occurred and compile a more
dynamic kernel upon recompile.
backend (str or Callable): backend to be used
- "inductor" is the default backend, which is a good balance between performance and overhead
- Non experimental in-tree backends can be seen with `torch._dynamo.list_backends()`
- Experimental or debug in-tree backends can be seen with `torch._dynamo.list_backends(None)`
- To register an out-of-tree custom backend:
https://pytorch.org/docs/main/torch.compiler_custom_backends.html#registering-custom-backends
mode (str): Can be either "default", "reduce-overhead", "max-autotune" or "max-autotune-no-cudagraphs"
- "default" is the default mode, which is a good balance between performance and overhead
- "reduce-overhead" is a mode that reduces the overhead of python with CUDA graphs,
useful for small batches. Reduction of overhead can come at the cost of more memory
usage, as we will cache the workspace memory required for the invocation so that we
do not have to reallocate it on subsequent runs. Reduction of overhead is not guaranteed
to work; today, we only reduce overhead for CUDA only graphs which do not mutate inputs.
There are other circumstances where CUDA graphs are not applicable; use TORCH_LOG=perf_hints
to debug.
- "max-autotune" is a mode that leverages Triton or template based matrix multiplications
on supported devices and Triton based convolutions on GPU.
It enables CUDA graphs by default on GPU.
- "max-autotune-no-cudagraphs" is a mode similar to "max-autotune" but without CUDA graphs
- To see the exact configs that each mode sets you can call `torch._inductor.list_mode_options()`
options (dict): A dictionary of options to pass to the backend. Some notable ones to try out are
- `epilogue_fusion` which fuses pointwise ops into templates. Requires `max_autotune` to also be set
- `max_autotune` which will profile to pick the best matmul configuration
- `fallback_random` which is useful when debugging accuracy issues
- `shape_padding` which pads matrix shapes to better align loads on GPUs especially for tensor cores
- `triton.cudagraphs` which will reduce the overhead of python with CUDA graphs
- `trace.enabled` which is the most useful debugging flag to turn on
- `trace.graph_diagram` which will show you a picture of your graph after fusion
- `guard_filter_fn` that controls which dynamo guards are saved with compilations.
This is an unsafe feature and there is no backward compatibility guarantee provided
for dynamo guards as data types.
For stable helper functions to use, see the documentations in `torch.compiler`, for example:
- `torch.compiler.skip_guard_on_inbuilt_nn_modules_unsafe`
- `torch.compiler.skip_guard_on_all_nn_modules_unsafe`
- `torch.compiler.keep_tensor_guards_unsafe`
- For inductor you can see the full list of configs that it supports by calling `torch._inductor.list_options()`
disable (bool): Turn torch.compile() into a no-op for testing
Example::
@torch.compile(options={"triton.cudagraphs": True}, fullgraph=True)
def foo(x):
return torch.sin(x) + torch.cos(x)
"""
import sysconfig
_C._log_api_usage_once("torch.compile")
if sys.version_info >= (3, 15):
raise RuntimeError("torch.compile is not supported on Python 3.15+")
elif sysconfig.get_config_var("Py_GIL_DISABLED") == 1 and sys.version_info < (
3,
13,
3,
):
raise RuntimeError(
"torch.compile is not supported on Python < 3.13.3 built with GIL disabled. "
"Please use Python 3.13.3+."
)
# Decorator mode
if model is None:
def fn(model: _Callable[_InputT, _RetT]) -> _Callable[_InputT, _RetT]:
if model is None:
raise RuntimeError("Model can't be None")
return compile( # pyrefly: ignore # no-matching-overload
model,
fullgraph=fullgraph,
dynamic=dynamic,
backend=backend,
mode=mode,
options=options,
disable=disable,
)
return fn
if mode is not None and options is not None:
raise RuntimeError(
"Either mode or options can be specified, but both can't be specified at the same time."
)
if mode is None and options is None:
mode = "default"
from torch._inductor.compiler_bisector import CompilerBisector
if bisect_backend := CompilerBisector.get_backend():
import torch._inductor.config as inductor_config
# don't override the backend for use cases like vllm
# which leverages their custom backend.
if not (
inductor_config.test_configs.bisect_keep_custom_backend_for_inductor
and bisect_backend == "inductor"
and not isinstance(backend, str)
):
backend = bisect_backend
guard_filter_fn = None
use_aoti = False
if options and isinstance(options, dict):
guard_filter_fn = options.pop("guard_filter_fn", None)
use_aoti = options.pop("use_aoti", False)
if torch.compiler.is_exporting():
warnings.warn(
"You are calling torch.compile inside torch.export region. "
"To capture an useful graph, we will implicitly switch to torch.compile(backend=eager)",
stacklevel=2,
)
from torch._higher_order_ops.utils import setup_compilation_env
# Create wrapper that always uses eager backend during export
def export_wrapped_fn(*args, **kwargs):
with setup_compilation_env() as backend: # type: ignore[attr-defined]
# Force eager backend regardless of original backend
backend_wrapper = _TorchCompileWrapper(backend, mode, options, dynamic)
return torch._dynamo.optimize(
backend=backend_wrapper,
nopython=fullgraph,
dynamic=dynamic,
disable=disable,
guard_filter_fn=guard_filter_fn,
# pyrefly: ignore [bad-argument-type]
)(model)(*args, **kwargs)
return export_wrapped_fn
if backend == "inductor":
if use_aoti:
backend = _TorchCompileAOTInductorWrapper(mode, options, dynamic)
else:
backend = _TorchCompileInductorWrapper(mode, options, dynamic)
else:
backend = _TorchCompileWrapper(backend, mode, options, dynamic)
return torch._dynamo.optimize(
backend=backend,
nopython=fullgraph,
dynamic=dynamic,
disable=disable,
guard_filter_fn=guard_filter_fn,
)(model) # type: ignore[return-value]
def _register_device_module(device_type, module):
r"""Register an external runtime module of the specific :attr:`device_type`
supported by torch.
After the :attr:`module` is registered correctly, the user can refer
the external runtime module as part of torch with attribute torch.xxx.
"""
# Make sure the device_type represent a supported device type for torch.
device_type = torch.device(device_type).type
m = sys.modules[__name__]
if hasattr(m, device_type):
raise RuntimeError(
f"The runtime module of '{device_type}' has already "
f"been registered with '{getattr(m, device_type)}'"
)
setattr(m, device_type, module)
torch_module_name = ".".join([__name__, device_type])
sys.modules[torch_module_name] = module
from torch import (
export as export,
func as func,
library as library,
return_types as return_types,
)
from torch._higher_order_ops import cond as cond, while_loop as while_loop
from torch.func import vmap as vmap
if not TYPE_CHECKING:
from torch import _meta_registrations
# Enable CUDA Sanitizer
if "TORCH_CUDA_SANITIZER" in os.environ:
import torch.cuda._sanitizer as csan
csan.enable_cuda_sanitizer()
# Populate magic methods on SymInt and SymFloat
import torch.fx.experimental.sym_node
from torch import fx as fx
# Register MPS specific decomps
torch.backends.mps._init()
from torch import compiler as compiler
| _TorchCompileWrapper |
python | django__django | tests/queries/tests.py | {
"start": 2218,
"end": 52028
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.nc1 = generic = NamedCategory.objects.create(name="Generic")
cls.t1 = Tag.objects.create(name="t1", category=generic)
cls.t2 = Tag.objects.create(name="t2", parent=cls.t1, category=generic)
cls.t3 = Tag.objects.create(name="t3", parent=cls.t1)
cls.t4 = Tag.objects.create(name="t4", parent=cls.t3)
cls.t5 = Tag.objects.create(name="t5", parent=cls.t3)
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
cls.n2 = Note.objects.create(note="n2", misc="bar", id=2)
cls.n3 = Note.objects.create(note="n3", misc="foo", id=3, negate=False)
cls.ann1 = Annotation.objects.create(name="a1", tag=cls.t1)
cls.ann1.notes.add(cls.n1)
ann2 = Annotation.objects.create(name="a2", tag=cls.t4)
ann2.notes.add(cls.n2, cls.n3)
# Create these out of order so that sorting by 'id' will be different
# to sorting by 'info'. Helps detect some problems later.
cls.e2 = ExtraInfo.objects.create(
info="e2", note=cls.n2, value=41, filterable=False
)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1, value=42)
cls.a1 = Author.objects.create(name="a1", num=1001, extra=e1)
cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1)
cls.a3 = Author.objects.create(name="a3", num=3003, extra=cls.e2)
cls.a4 = Author.objects.create(name="a4", num=4004, extra=cls.e2)
cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
cls.i1 = Item.objects.create(
name="one",
created=cls.time1,
modified=cls.time1,
creator=cls.a1,
note=cls.n3,
)
cls.i1.tags.set([cls.t1, cls.t2])
cls.i2 = Item.objects.create(
name="two", created=cls.time2, creator=cls.a2, note=cls.n2
)
cls.i2.tags.set([cls.t1, cls.t3])
cls.i3 = Item.objects.create(
name="three", created=time3, creator=cls.a2, note=cls.n3
)
cls.i4 = Item.objects.create(
name="four", created=time4, creator=cls.a4, note=cls.n3
)
cls.i4.tags.set([cls.t4])
cls.r1 = Report.objects.create(name="r1", creator=cls.a1)
cls.r2 = Report.objects.create(name="r2", creator=cls.a3)
cls.r3 = Report.objects.create(name="r3")
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2)
cls.c1 = Cover.objects.create(title="first", item=cls.i4)
cls.c2 = Cover.objects.create(title="second", item=cls.i2)
def test_subquery_condition(self):
qs1 = Tag.objects.filter(pk__lte=0)
qs2 = Tag.objects.filter(parent__in=qs1)
qs3 = Tag.objects.filter(parent__in=qs2)
self.assertEqual(qs3.query.subq_aliases, {"T", "U", "V"})
self.assertIn("v0", str(qs3.query).lower())
qs4 = qs3.filter(parent__in=qs1)
self.assertEqual(qs4.query.subq_aliases, {"T", "U", "V"})
# It is possible to reuse U for the second subquery, no need to use W.
self.assertNotIn("w0", str(qs4.query).lower())
# So, 'U0."id"' is referenced in SELECT and WHERE twice.
self.assertEqual(str(qs4.query).lower().count("u0."), 4)
def test_ticket1050(self):
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=True),
[self.i3],
)
self.assertSequenceEqual(
Item.objects.filter(tags__id__isnull=True),
[self.i3],
)
def test_ticket1801(self):
self.assertSequenceEqual(
Author.objects.filter(item=self.i2),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i3),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
[self.a2],
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).order_by("name"),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1)).filter(
Q(creator__name="fred") | Q(tags=self.t2)
),
[self.i1],
)
# Each filter call is processed "at once" against a single table, so
# this is different from the previous example as it tries to find tags
# that are two things at once (rather than two tags).
self.assertSequenceEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), []
)
self.assertSequenceEqual(
Item.objects.filter(
Q(tags=self.t1), Q(creator__name="fred") | Q(tags=self.t2)
),
[],
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertSequenceEqual(list(qs), [self.a2])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertSequenceEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2])
.distinct()
.order_by("name"),
[self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
[self.i2],
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by("name")[:3],
[self.i1, self.i1, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(tags__in=[self.t1, self.t2])
.distinct()
.order_by("name")[:3],
[self.i1, self.i2],
)
def test_tickets_2080_3592(self):
self.assertSequenceEqual(
Author.objects.filter(item__name="one") | Author.objects.filter(name="a3"),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name="one") | Q(name="a3")),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(name="a3") | Q(item__name="one")),
[self.a1, self.a3],
)
self.assertSequenceEqual(
Author.objects.filter(Q(item__name="three") | Q(report__name="r3")),
[self.a2],
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no
# constraints (which would match everything).
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertSequenceEqual(Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), [])
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values("creator").distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test
# multi-column cases (which require some tricky SQL transformations
# under the covers).
xx = Item(name="four", created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name="two")
.values("creator", "name")
.distinct()
.count(),
4,
)
self.assertEqual(
(
Item.objects.exclude(name="two")
.extra(select={"foo": "%s"}, select_params=(1,))
.values("creator", "name", "foo")
.distinct()
.count()
),
4,
)
self.assertEqual(
(
Item.objects.exclude(name="two")
.extra(select={"foo": "%s"}, select_params=(1,))
.values("creator", "name")
.distinct()
.count()
),
4,
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values("creator", "name").count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by("name")
q2 = Item.objects.filter(id=self.i1.id)
self.assertSequenceEqual(q1, [self.i4, self.i1, self.i3, self.i2])
self.assertSequenceEqual(q2, [self.i1])
self.assertSequenceEqual(
(q1 | q2).order_by("name"),
[self.i4, self.i1, self.i3, self.i2],
)
self.assertSequenceEqual((q1 & q2).order_by("name"), [self.i1])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertSequenceEqual(
((q1 & q2) | q3).order_by("name"),
[self.i4, self.i1],
)
def test_order_by_tables(self):
q1 = Item.objects.order_by("name")
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by("name").query
self.assertEqual(
len(
[
t
for t in combined_query.alias_map
if combined_query.alias_refcount[t]
]
),
1,
)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by("greatest_fan__fan_of")
self.assertIn("OUTER JOIN", str(qs.query))
qs = qs.order_by("id")
self.assertNotIn("OUTER JOIN", str(qs.query))
def test_order_by_related_field_transform(self):
extra_12 = ExtraInfo.objects.create(
info="extra 12",
date=DateTimePK.objects.create(date=datetime.datetime(2021, 12, 10)),
)
extra_11 = ExtraInfo.objects.create(
info="extra 11",
date=DateTimePK.objects.create(date=datetime.datetime(2022, 11, 10)),
)
self.assertSequenceEqual(
ExtraInfo.objects.filter(date__isnull=False).order_by("date__month"),
[extra_11, extra_12],
)
def test_filter_by_related_field_transform(self):
extra_old = ExtraInfo.objects.create(
info="extra 12",
date=DateTimePK.objects.create(date=datetime.datetime(2020, 12, 10)),
)
ExtraInfo.objects.create(info="extra 11", date=DateTimePK.objects.create())
a5 = Author.objects.create(name="a5", num=5005, extra=extra_old)
fk_field = ExtraInfo._meta.get_field("date")
with register_lookup(fk_field, ExtractYear):
self.assertSequenceEqual(
ExtraInfo.objects.filter(date__year=2020),
[extra_old],
)
self.assertSequenceEqual(
Author.objects.filter(extra__date__year=2020), [a5]
)
def test_filter_by_related_field_nested_transforms(self):
extra = ExtraInfo.objects.create(info=" extra")
a5 = Author.objects.create(name="a5", num=5005, extra=extra)
info_field = ExtraInfo._meta.get_field("info")
with register_lookup(info_field, Length), register_lookup(CharField, LTrim):
self.assertSequenceEqual(
Author.objects.filter(extra__info__ltrim__length=5), [a5]
)
def test_get_clears_ordering(self):
"""
get() should clear ordering for optimization purposes.
"""
with CaptureQueriesContext(connection) as captured_queries:
Author.objects.order_by("name").get(pk=self.a1.pk)
self.assertNotIn("order by", captured_queries[0]["sql"].lower())
def test_tickets_4088_4306(self):
self.assertSequenceEqual(Report.objects.filter(creator=1001), [self.r1])
self.assertSequenceEqual(Report.objects.filter(creator__num=1001), [self.r1])
self.assertSequenceEqual(Report.objects.filter(creator__id=1001), [])
self.assertSequenceEqual(
Report.objects.filter(creator__id=self.a1.id), [self.r1]
)
self.assertSequenceEqual(Report.objects.filter(creator__name="a1"), [self.r1])
def test_ticket4510(self):
self.assertSequenceEqual(
Author.objects.filter(report__name="r1"),
[self.a1],
)
def test_ticket7378(self):
self.assertSequenceEqual(self.a1.report_set.all(), [self.r1])
def test_tickets_5324_6704(self):
self.assertSequenceEqual(
Item.objects.filter(tags__name="t4"),
[self.i4],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t4").order_by("name").distinct(),
[self.i1, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t4").order_by("name").distinct().reverse(),
[self.i2, self.i3, self.i1],
)
self.assertSequenceEqual(
Author.objects.exclude(item__name="one").distinct().order_by("name"),
[self.a2, self.a3, self.a4],
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1").order_by("name"),
[self.i4, self.i3],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1").exclude(tags__name="t4"),
[self.i3],
)
# Excluding from a relation that cannot be NULL should not use outer
# joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
)
self.assertEqual(
len(
[
x
for x in qs.query.alias_map.values()
if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias]
]
),
1,
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertSequenceEqual(
Tag.objects.filter(parent__isnull=True).order_by("name"), [self.t1]
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__isnull=True).order_by("name"),
[self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__name="t1") | Q(parent__isnull=True)).order_by(
"name"
),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name="t1")).order_by(
"name"
),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by("name"),
[self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by("name"),
[self.t4, self.t5],
)
def test_ticket2091(self):
t = Tag.objects.get(name="t4")
self.assertSequenceEqual(Item.objects.filter(tags__in=[t]), [self.i4])
def test_avoid_infinite_loop_on_too_many_subqueries(self):
x = Tag.objects.filter(pk=1)
local_recursion_limit = sys.getrecursionlimit() // 16
msg = "Maximum recursion depth exceeded: too many subqueries."
with self.assertRaisesMessage(RecursionError, msg):
for i in range(local_recursion_limit + 2):
x = Tag.objects.filter(pk__in=x)
def test_reasonable_number_of_subq_aliases(self):
x = Tag.objects.filter(pk=1)
for _ in range(20):
x = Tag.objects.filter(pk__in=x)
self.assertEqual(
x.query.subq_aliases,
{
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"AA",
"AB",
"AC",
"AD",
"AE",
"AF",
"AG",
"AH",
"AI",
"AJ",
"AK",
"AL",
"AM",
"AN",
},
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a
# well-defined fashion. We raise an error.
msg = "Cannot combine queries on two different base models."
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() & Tag.objects.all()
with self.assertRaisesMessage(TypeError, msg):
Author.objects.all() | Tag.objects.all()
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={"foo": "1"}).count(), 4)
self.assertEqual(
Author.objects.extra(select={"foo": "%s"}, select_params=(1,)).count(), 4
)
def test_ticket2400(self):
self.assertSequenceEqual(
Author.objects.filter(item__isnull=True),
[self.a3],
)
self.assertSequenceEqual(
Tag.objects.filter(item__isnull=True),
[self.t5],
)
def test_ticket2496(self):
self.assertSequenceEqual(
Item.objects.extra(tables=["queries_author"])
.select_related()
.order_by("name")[:1],
[self.i4],
)
def test_error_raised_on_filter_with_dictionary(self):
with self.assertRaisesMessage(FieldError, "Cannot parse keyword query as dict"):
Note.objects.filter({"note": "n1", "misc": "foo"})
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertSequenceEqual(
Item.objects.order_by("note__note", "name"),
[self.i2, self.i4, self.i1, self.i3],
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertSequenceEqual(
Author.objects.order_by("extra", "-name"),
[self.a2, self.a1, self.a4, self.a3],
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertSequenceEqual(Cover.objects.all(), [self.c1, self.c2])
# If the remote model does not have a default ordering, we order by its
# 'id' field.
self.assertSequenceEqual(
Item.objects.order_by("creator", "name"),
[self.i1, self.i3, self.i2, self.i4],
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertSequenceEqual(
Item.objects.filter(tags__isnull=False).order_by("tags", "id"),
[self.i1, self.i2, self.i1, self.i2, self.i4],
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by("name")
self.assertSequenceEqual(qs, [self.i4, self.i1, self.i3, self.i2])
self.assertEqual(len(qs.query.alias_map), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by("note__note", "name")
self.assertQuerySetEqual(qs, [self.i2, self.i4, self.i1, self.i3])
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertEqual(repr(qs[0].note), "<Note: n2>")
self.assertEqual(repr(qs[0].creator.extra.note), "<Note: n1>")
def test_ticket3037(self):
self.assertSequenceEqual(
Item.objects.filter(
Q(creator__name="a3", name="two") | Q(creator__name="a4", name="four")
),
[self.i4],
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertSequenceEqual(
Note.objects.values("misc").distinct().order_by("note", "-misc"),
[{"misc": "foo"}, {"misc": "bar"}, {"misc": "foo"}],
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertIn("note_id", ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertSequenceEqual(
ExtraInfo.objects.values("note_id"), [{"note_id": 1}, {"note_id": 2}]
)
# ...or use the field name.
self.assertSequenceEqual(
ExtraInfo.objects.values("note"), [{"note": 1}, {"note": 2}]
)
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertSequenceEqual(
Author.objects.filter(id=self.a1.id).filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
),
[self.a1],
)
self.assertSequenceEqual(
Author.objects.filter(
Q(extra__note=self.n1) | Q(item__note=self.n3)
).filter(id=self.a1.id),
[self.a1],
)
def test_ticket6981(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_ticket9926(self):
self.assertSequenceEqual(
Tag.objects.select_related("parent", "category").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
self.assertSequenceEqual(
Tag.objects.select_related("parent", "parent__category").order_by("name"),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes("created", "month").count(), 1)
self.assertEqual(Item.objects.datetimes("created", "day").count(), 2)
self.assertEqual(len(Item.objects.datetimes("created", "day")), 2)
self.assertEqual(
Item.objects.datetimes("created", "day")[0],
datetime.datetime(2007, 12, 19, 0, 0),
)
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertSequenceEqual(
Item.objects.datetimes("created", "day").extra(select={"a": 1}),
[
datetime.datetime(2007, 12, 19, 0, 0),
datetime.datetime(2007, 12, 20, 0, 0),
],
)
self.assertSequenceEqual(
Item.objects.extra(select={"a": 1}).datetimes("created", "day"),
[
datetime.datetime(2007, 12, 19, 0, 0),
datetime.datetime(2007, 12, 20, 0, 0),
],
)
name = "one"
self.assertSequenceEqual(
Item.objects.datetimes("created", "day").extra(
where=["name=%s"], params=[name]
),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
self.assertSequenceEqual(
Item.objects.extra(where=["name=%s"], params=[name]).datetimes(
"created", "day"
),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_ticket7155(self):
# Nullable dates
self.assertSequenceEqual(
Item.objects.datetimes("modified", "day"),
[datetime.datetime(2007, 12, 19, 0, 0)],
)
def test_order_by_rawsql(self):
self.assertSequenceEqual(
Item.objects.values("note__note").order_by(
RawSQL("queries_note.note", ()),
"id",
),
[
{"note__note": "n2"},
{"note__note": "n3"},
{"note__note": "n3"},
{"note__note": "n3"},
],
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertSequenceEqual(
Tag.objects.filter(parent=self.t1, name="t3").order_by("name"),
[self.t3],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent=self.t1, name="t3").order_by("name"),
[self.t1, self.t2, self.t4, self.t5],
)
self.assertSequenceEqual(
Item.objects.exclude(tags__name="t1", name="one")
.order_by("name")
.distinct(),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Item.objects.filter(name__in=["three", "four"])
.exclude(tags__name="t1")
.order_by("name"),
[self.i4, self.i3],
)
# More twisted cases, involving nested negations.
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name="t1", name="one")),
[self.i1],
)
self.assertSequenceEqual(
Item.objects.filter(~Q(tags__name="t1", name="one"), name="two"),
[self.i2],
)
self.assertSequenceEqual(
Item.objects.exclude(~Q(tags__name="t1", name="one"), name="two"),
[self.i4, self.i1, self.i3],
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(query2.get_compiler(qs.db).as_sql()[0], query)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer("name", "creator")
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertSequenceEqual(
self.n1.annotation_set.filter(
Q(tag=self.t5)
| Q(tag__children=self.t5)
| Q(tag__children__children=self.t5)
),
[self.ann1],
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertSequenceEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
[self.i1, self.i2],
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal="m")
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertSequenceEqual(q.all(), [])
self.assertSequenceEqual(q.filter(meal="m"), [])
self.assertSequenceEqual(q.exclude(meal="m"), [])
self.assertSequenceEqual(q.complex_filter({"pk": 1}), [])
self.assertSequenceEqual(q.select_related("food"), [])
self.assertSequenceEqual(q.annotate(Count("food")), [])
self.assertSequenceEqual(q.order_by("meal", "food"), [])
self.assertSequenceEqual(q.distinct(), [])
self.assertSequenceEqual(q.extra(select={"foo": "1"}), [])
self.assertSequenceEqual(q.reverse(), [])
q.query.low_mark = 1
msg = "Cannot change a query once a slice has been taken."
with self.assertRaisesMessage(TypeError, msg):
q.extra(select={"foo": "1"})
self.assertSequenceEqual(q.defer("meal"), [])
self.assertSequenceEqual(q.only("meal"), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(len(Note.objects.order_by("extrainfo__info").distinct()), 3)
# Pickling of QuerySets using datetimes() should work.
qs = Item.objects.datetimes("created", "month")
pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertSequenceEqual(
Tag.objects.filter(
name__in=Tag.objects.filter(parent=self.t1).values("name")
),
[self.t2, self.t3],
)
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertSequenceEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{"id": 1}, {"id": 2}, {"id": 3}],
)
self.assertSequenceEqual(
Annotation.objects.filter(
notes__in=Note.objects.filter(note="n1")
.values_list("note")
.values("id")
),
[self.ann1],
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue
# properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
yield n_obj.pk
self.assertSequenceEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertSequenceEqual(qs, [self.a1, self.a2])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertSequenceEqual(qs, [self.a3, self.a4])
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
subq = Author.objects.filter(num__lt=3000)
self.assertSequenceEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name="a1")),
[self.a1],
)
# The subquery result cache should not be populated
self.assertIsNone(subq._result_cache)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertSequenceEqual(
Item.objects.exclude(modified=self.time1).order_by("name"),
[self.i4, self.i3, self.i2],
)
self.assertSequenceEqual(
Tag.objects.exclude(parent__name=self.t1.name),
[self.t1, self.t4, self.t5],
)
def test_ticket7181(self):
# Ordering by related tables should accommodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by("parent__name")), 5)
# Empty querysets can be merged with others.
self.assertSequenceEqual(
Note.objects.none() | Note.objects.all(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.all() | Note.objects.none(),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(Note.objects.none() & Note.objects.all(), [])
self.assertSequenceEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertSequenceEqual(
Author.objects.filter(
Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name="xyz")
),
[self.a2],
)
self.assertSequenceEqual(
Author.objects.filter(
Q(report=self.r1, name="xyz") | Q(item__note__extrainfo=self.e2)
),
[self.a2],
)
self.assertSequenceEqual(
Annotation.objects.filter(
Q(tag__parent=self.t1) | Q(notes__note="n1", name="a1")
),
[self.ann1],
)
xx = ExtraInfo.objects.create(info="xx", note=self.n3)
self.assertSequenceEqual(
Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)),
[self.n1, self.n3],
)
q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query
self.assertEqual(
len(
[
x
for x in q.alias_map.values()
if x.join_type == LOUTER and q.alias_refcount[x.table_alias]
]
),
1,
)
def test_ticket17429(self):
"""
Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertCountEqual(
Tag.objects.all(),
[self.t1, self.t2, self.t3, self.t4, self.t5],
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerySetEqual(
Item.objects.exclude(tags__name="t4"),
Item.objects.filter(~Q(tags__name="t4")),
)
self.assertQuerySetEqual(
Item.objects.exclude(Q(tags__name="t4") | Q(tags__name="t3")),
Item.objects.filter(~(Q(tags__name="t4") | Q(tags__name="t3"))),
)
self.assertQuerySetEqual(
Item.objects.exclude(Q(tags__name="t4") | ~Q(tags__name="t3")),
Item.objects.filter(~(Q(tags__name="t4") | ~Q(tags__name="t3"))),
)
def test_nested_exclude(self):
self.assertQuerySetEqual(
Item.objects.exclude(~Q(tags__name="t4")),
Item.objects.filter(~~Q(tags__name="t4")),
)
def test_double_exclude(self):
self.assertQuerySetEqual(
Item.objects.filter(Q(tags__name="t4")),
Item.objects.filter(~~Q(tags__name="t4")),
)
self.assertQuerySetEqual(
Item.objects.filter(Q(tags__name="t4")),
Item.objects.filter(~Q(~Q(tags__name="t4"))),
)
def test_exclude_in(self):
self.assertQuerySetEqual(
Item.objects.exclude(Q(tags__name__in=["t4", "t3"])),
Item.objects.filter(~Q(tags__name__in=["t4", "t3"])),
)
self.assertQuerySetEqual(
Item.objects.filter(Q(tags__name__in=["t4", "t3"])),
Item.objects.filter(~~Q(tags__name__in=["t4", "t3"])),
)
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertSequenceEqual(q, [self.t2, self.t3, self.t4, self.t5])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertSequenceEqual(q, [self.t1])
self.assertNotIn("JOIN", str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertNotIn("INNER JOIN", str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
# Querying without isnull should not convert anything to left outer
# join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertSequenceEqual(q, [self.t4, self.t5])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
# join to dumbcategory ptr_id
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
self.assertSequenceEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertSequenceEqual(q, [self.nc1])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertSequenceEqual(q, [self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 2)
self.assertNotIn("INNER JOIN", str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3)
self.assertNotIn("INNER JOIN", str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer
# joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertSequenceEqual(q, [self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner
# joins and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertSequenceEqual(
q,
[self.a1, self.a1, self.a2, self.a2, self.a2, self.a3],
)
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 4)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 3)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertSequenceEqual(q, [self.a3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
q = Author.objects.filter(item__isnull=False)
self.assertSequenceEqual(q, [self.a1, self.a2, self.a2, self.a4])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer
# join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertSequenceEqual(q, [self.t1, self.t2, self.t3])
self.assertEqual(str(q.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q.query).count("INNER JOIN"), 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3, self.t4, self.t5])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q1 & q2
self.assertSequenceEqual(q3, [])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 0)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
q3 = q2 | q1
self.assertSequenceEqual(q3, [self.t1, self.t2, self.t3])
self.assertEqual(str(q3.query).count("LEFT OUTER JOIN"), 1)
self.assertEqual(str(q3.query).count("INNER JOIN"), 0)
def test_ticket19672(self):
self.assertSequenceEqual(
Report.objects.filter(
Q(creator__isnull=False) & ~Q(creator__extra__value=41)
),
[self.r1],
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count("item"))
qs = qs.filter(~Q(extra__value=0)).order_by("name")
self.assertIn("SELECT", str(qs.query))
self.assertSequenceEqual(qs, [self.a1, self.a2, self.a3, self.a4])
def test_lookup_constraint_fielderror(self):
msg = (
"Cannot resolve keyword 'unknown_field' into field. Choices are: "
"annotation, category, category_id, children, id, item, "
"managedmodel, name, note, parent, parent_id"
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(unknown_field__name="generic")
def test_common_mixed_case_foreign_keys(self):
"""
Valid query should be generated when fields fetched from joined tables
include FKs whose names only differ by case.
"""
c1 = SimpleCategory.objects.create(name="c1")
c2 = SimpleCategory.objects.create(name="c2")
c3 = SimpleCategory.objects.create(name="c3")
category = CategoryItem.objects.create(category=c1)
mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(
CaTeGoRy=c2
)
mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(
category=c3
)
CommonMixedCaseForeignKeys.objects.create(
category=category,
mixed_case_field_category=mixed_case_field_category,
mixed_case_db_column_category=mixed_case_db_column_category,
)
qs = CommonMixedCaseForeignKeys.objects.values(
"category",
"mixed_case_field_category",
"mixed_case_db_column_category",
"category__category",
"mixed_case_field_category__CaTeGoRy",
"mixed_case_db_column_category__category",
)
self.assertTrue(qs.first())
def test_excluded_intermediary_m2m_table_joined(self):
self.assertSequenceEqual(
Note.objects.filter(~Q(tag__annotation__name=F("note"))),
[self.n1, self.n2, self.n3],
)
self.assertSequenceEqual(
Note.objects.filter(tag__annotation__name="a1").filter(
~Q(tag__annotation__name=F("note"))
),
[],
)
def test_field_with_filterable(self):
self.assertSequenceEqual(
Author.objects.filter(extra=self.e2),
[self.a3, self.a4],
)
def test_negate_field(self):
self.assertSequenceEqual(
Note.objects.filter(negate=True),
[self.n1, self.n2],
)
self.assertSequenceEqual(Note.objects.exclude(negate=True), [self.n3])
def test_combining_does_not_mutate(self):
all_authors = Author.objects.all()
authors_with_report = Author.objects.filter(
Exists(Report.objects.filter(creator__pk=OuterRef("id")))
)
authors_without_report = all_authors.exclude(pk__in=authors_with_report)
items_before = Item.objects.filter(creator__in=authors_without_report)
self.assertCountEqual(items_before, [self.i2, self.i3, self.i4])
# Combining querysets doesn't mutate them.
all_authors | authors_with_report
all_authors & authors_with_report
authors_without_report = all_authors.exclude(pk__in=authors_with_report)
items_after = Item.objects.filter(creator__in=authors_without_report)
self.assertCountEqual(items_after, [self.i2, self.i3, self.i4])
self.assertCountEqual(items_before, items_after)
@skipUnlessDBFeature("supports_select_union")
def test_union_values_subquery(self):
items = Item.objects.filter(creator=OuterRef("pk"))
item_authors = Author.objects.annotate(is_creator=Exists(items)).order_by()
reports = Report.objects.filter(creator=OuterRef("pk"))
report_authors = Author.objects.annotate(is_creator=Exists(reports)).order_by()
all_authors = item_authors.union(report_authors).order_by("is_creator")
self.assertEqual(
list(all_authors.values_list("is_creator", flat=True)), [False, True]
)
| Queries1Tests |
python | coleifer__peewee | examples/anomaly_detection.py | {
"start": 68,
"end": 329
} | class ____(Model):
key = TextField()
value = IntegerField()
class Meta:
database = db
db.create_tables([Reg])
# Create a user-defined aggregate function suitable for computing the standard
# deviation of a series.
@db.aggregate('stddev')
| Reg |
python | kamyu104__LeetCode-Solutions | Python/flip-game-ii.py | {
"start": 1183,
"end": 2293
} | class ____(object):
def canWin(self, s):
"""
:type s: str
:rtype: bool
"""
lookup = {}
def canWinHelper(consecutives): # O(2^c) time
consecutives = tuple(sorted(c for c in consecutives if c >= 2)) # O(clogc) time
if consecutives not in lookup:
lookup[consecutives] = any(not canWinHelper(consecutives[:i] + (j, c-2-j) + consecutives[i+1:]) # O(c) time
for i, c in enumerate(consecutives) # O(c) time
for j in xrange(c - 1)) # O(c) time
return lookup[consecutives] # O(c) time
# re.findall: O(n) time, canWinHelper: O(c) in depth
return canWinHelper(map(len, re.findall(r'\+\++', s)))
# Time: O(c * n * c!), n is length of string, c is count of "++"
# Space: O(c * n), recursion would be called at most c in depth.
# Besides, it costs n space for modifying string at each depth.
| Solution2 |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_tensor_shape.py | {
"start": 1399,
"end": 26661
} | class ____:
"""A collection of tensors encoding the shape of a potentially ragged tensor.
Each `RaggedTensorDynamicShape` consists of an ordered list of dimension
sizes. There are two dimension types:
* "Uniform dimensions" are dimensions where all slices have the same
length. `RaggedTensorDynamicShape` records the size of each uniform
dimension using a single scalar integer.
* "Ragged dimensions" are dimensions whose slices may have different
lengths. `RaggedTensorDynamicShape` records the size of each ragged
dimension using an integer vector containing the slice lengths for all
the slices across that dimension.
Furthermore, there are two ways a dimension might be encoded:
* "Partitioned dimensions" are dimensions that are encoded using a
`RaggedTensor`'s `nested_row_splits`. The outermostmost partitioned
dimension must be uniform, and the innermost partitioned dimension must
be ragged.
* "Inner dimensions" are dimensions that are encoded using a
`RaggedTensor`'s `flat_values`. Inner dimensions are always uniform.
The sizes of partitioned dimensions are recorded using `partitioned_dim_sizes`
and `inner_dim_sizes`:
* `partitioned_dim_sizes` is a list of tensors (one for each partitioned
dimension).
* For uniform dimensions, the tensor is an integer scalar specifying the
size of all slices across that dimension.
* For ragged dimensions, the tensor is an integer vector specifying the
size of each slice across that dimension.
* `inner_dim_sizes` is a single integer vector, where each element
specifies the size of a single inner dimension.
Examples:
Tensor | Ragged | Partitioned Dim Sizes | Inner Dim
: Rank : : Sizes
------------------------------ | ------ | ---------------------- | ----------
`[[1, 2, 3], [4, 5, 6]]` | 0 | | `2, 3`
`[[1, 2], [], [3, 4, 5]]` | 1 | `3, (2, 0, 3)` |
`[[[1, 2], [3, 4]], [[5, 6]]]` | 1 | `2, (2, 1)` | 2
`[[[1, 2], [3]], [[4, 5]]]` | 2 | `2, (2, 1), (2, 1, 2)` |
"""
def __init__(self, partitioned_dim_sizes, inner_dim_sizes,
dim_size_dtype=None):
"""Creates a RaggedTensorDynamicShape.
Args:
partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for
each partitioned dimension. If dimension `d` is uniform, then
`partitioned_dim_sizes[d]` must be an integer scalar, specifying the
size of all slices across dimension `d`. If dimension `d` is ragged,
then `partitioned_dim_sizes[d]` must be an integer vector, specifying
the size of each slice across dimension `d`.
inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the
number of inner dimensions. `inner_dim_sizes[n]` is the size of all
slices across the `n`th inner dimension (which is the
`(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor.
dim_size_dtype: dtype for dimension sizes. If not specified, then it
is chosen based on the dtypes of `partitioned_dim_sizes` and
`inner_dim_sizes`.
"""
assert isinstance(partitioned_dim_sizes, (list, tuple))
with ops.name_scope(None, 'RaggedTensorDynamicShape',
(partitioned_dim_sizes, inner_dim_sizes)):
partitioned_dim_sizes = tuple(
ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i)
for (i, size) in enumerate(partitioned_dim_sizes))
inner_dim_sizes = ops.convert_to_tensor(
inner_dim_sizes, name='inner_dim_sizes')
# Validate shapes.
if partitioned_dim_sizes:
for axis, dimension_size in enumerate(partitioned_dim_sizes):
if dimension_size.shape.ndims is None:
raise ValueError(
'rank of partitioned_dim_sizes[%d] is unknown' % axis)
dimension_size.shape.with_rank_at_most(1)
if partitioned_dim_sizes[0].shape.ndims == 1:
raise ValueError('outermost partitioned dimension must be uniform')
if partitioned_dim_sizes[-1].shape.ndims == 0:
raise ValueError('innermost partitioned dimension must be ragged')
inner_dim_sizes.shape.assert_has_rank(1)
# Convert dimension size tensors to a single dtype.
if dim_size_dtype is None:
dim_size_dtypes = set(
p.dtype for p in partitioned_dim_sizes if p.shape.ndims == 1)
if not dim_size_dtypes:
dim_size_dtype = dtypes.int64
elif len(dim_size_dtypes) == 1:
dim_size_dtype = dim_size_dtypes.pop()
else:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError('partitioned_dim_sizes must have matching dtypes')
dim_size_dtype = dtypes.int64
partitioned_dim_sizes = tuple(math_ops.cast(p, dim_size_dtype)
for p in partitioned_dim_sizes)
inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype)
self._partitioned_dim_sizes = partitioned_dim_sizes
self._inner_dim_sizes = inner_dim_sizes
def __repr__(self):
return ('RaggedTensorDynamicShape'
'(partitioned_dim_sizes=%r, inner_dim_sizes=%r)' %
(self._partitioned_dim_sizes, self._inner_dim_sizes))
@staticmethod
def from_dim_sizes(dim_sizes):
"""Constructs a ragged shape from a list of dimension sizes.
This list contains a single tensor for each dimension, where the tensor
is a scalar if the dimension is uniform, or a vector if the dimension is
ragged.
Args:
dim_sizes: List of int32 or int64 scalars or vectors.
Returns:
A RaggedTensorDynamicShape.
"""
with ops.name_scope(None, 'RaggedTensorDynamicShapeFromDimensionSizes',
[dim_sizes]):
dim_sizes = tuple(
ops.convert_to_tensor(size, preferred_dtype=dtypes.int64,
name='dim_sizes') for size in dim_sizes)
# Split the dimensions into partitioned & inner dimensions.
inner_split = 0
for dim, dim_size in enumerate(dim_sizes):
if dim_size.shape.ndims == 1:
inner_split = dim + 1
elif dim_size.shape.ndims != 0:
raise ValueError('Each dim_size must be a scalar or a vector')
return RaggedTensorDynamicShape(dim_sizes[:inner_split],
dim_sizes[inner_split:])
@classmethod
def from_tensor(cls, rt_input, dim_size_dtype=None):
"""Constructs a ragged shape for a potentially ragged tensor."""
with ops.name_scope(None, 'RaggedTensorDynamicShapeFromTensor', [rt_input]):
rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)
if not ragged_tensor.is_ragged(rt_input):
return cls([], array_ops.shape(rt_input), dim_size_dtype=dim_size_dtype)
else:
partitioned_dim_sizes = (
(rt_input.nrows(),) + rt_input.nested_row_lengths())
return RaggedTensorDynamicShape(
partitioned_dim_sizes,
array_ops.shape(rt_input.flat_values)[1:],
dim_size_dtype=dim_size_dtype)
def dimension_size(self, axis):
"""Returns the size of slices across the specified dimension."""
if not isinstance(axis, int):
raise TypeError('axis must be an integer')
partitioned_ndims = len(self._partitioned_dim_sizes)
if axis < partitioned_ndims:
return self._partitioned_dim_sizes[axis]
else:
return self._inner_dim_sizes[axis - partitioned_ndims]
def is_ragged(self, axis):
"""Returns true if the indicated dimension is ragged."""
if not isinstance(axis, int):
raise TypeError('axis must be an integer')
rank = self.rank
if axis < 0:
raise ValueError('Negative axis values are not supported')
elif rank is not None and axis >= rank:
raise ValueError('Expected axis=%s < rank=%s' % (axis, rank))
else:
return (axis > 0 and axis < len(self._partitioned_dim_sizes) and
self._partitioned_dim_sizes[axis].shape.ndims == 1)
@property
def rank(self):
"""The number of dimensions in this shape, or None if unknown."""
inner_ndims = tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])
if inner_ndims is None:
return None
else:
return len(self._partitioned_dim_sizes) + inner_ndims
@property
def partitioned_dim_sizes(self):
"""The partitioned dimension sizes for this shape.
Returns:
A `list` of 0-D or 1-D integer `Tensor`.
"""
return self._partitioned_dim_sizes
@property
def inner_dim_sizes(self):
"""The inner dimension sizes for this shape.
Returns:
A 1-D integer `Tensor`.
"""
return self._inner_dim_sizes
@property
def num_partitioned_dimensions(self):
"""The number of partitioned dimensions in this shape."""
return len(self._partitioned_dim_sizes)
@property
def num_inner_dimensions(self):
"""The number of inner dimensions, or `None` if not statically known."""
return tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])
@property
def dim_size_dtype(self):
"""DType used by this shape for dimension sizes."""
return self._inner_dim_sizes.dtype
def broadcast_to_rank(self, rank):
"""Adds leading size-1 dimensions to broadcast `self` to the given rank.
E.g., if `shape1` is `[3, (D2), 4]`, then `shape1.broadcast_to_rank(5)`
is `[1, 1, 3, (D2), 4]`.
Args:
rank: The rank for the returned shape.
Returns:
A RaggedTensorDynamicShape with `rank` dimensions, whose inner dimensions
have the same size as `self` and whose outer dimensions have size `1`.
Raises:
ValueError: If `self.rank` is unknown or greater than `rank`.
"""
if self.rank is None:
raise ValueError('Unable to broadcast: self.rank is unknown')
dims_to_add = rank - self.rank
if dims_to_add < 0:
raise ValueError('Unable to broadcast: rank=%d must be greater than '
'self.rank=%d.' % (rank, self.rank))
elif dims_to_add == 0:
return self
elif self._partitioned_dim_sizes:
partitioned_dims = (1,) * dims_to_add + self._partitioned_dim_sizes
return RaggedTensorDynamicShape(partitioned_dims, self.inner_dim_sizes,
self.dim_size_dtype)
else:
inner_dims = array_ops.concat(
[array_ops.ones([dims_to_add], self.dim_size_dtype),
self.inner_dim_sizes],
axis=0)
return RaggedTensorDynamicShape([], inner_dims, self.dim_size_dtype)
def broadcast_dimension(self, axis, lengths):
"""Returns a shape that is broadcast-compatible with self & lengths.
* If dimension[axis] is uniform and lengths is a scalar, the check
that either lengths==1 or axis==1 or lengths==axis, and tile
dimension[axis] with tf.where(lengths==axis, 1, axis) repeats.
* If dimension[axis] is uniform and lengths is a vector, then check
that dimension[axis]==1, and raggedly tile dimension[axis] with
lengths repeats. (we can skip tiling if we statically know that
slice_lengths == 1??)
* If dimension[axis] is ragged and lengths is a scalar, then check
that lengths==1.
* If dimension[axis] is ragged and lengths is a vector, then check
that self.dimension_size(axis) == lengths.
Args:
axis: `int`. The dimension to broadcast.
lengths: 0-D or 1-D integer `Tensor`.
Returns:
A `RaggedTensorDynamicShape`.
"""
lengths = ragged_util.convert_to_int_tensor(
lengths, name='lengths', dtype=self.dim_size_dtype)
# Check whether lengths is a scalar (for uniform dimensions) or
# vector (for ragged dimensions).
if lengths.shape.ndims is None:
raise ValueError('lengths must have a known rank.')
elif lengths.shape.ndims > 1:
raise ValueError('lengths must be a scalar or vector')
else:
lengths_is_scalar = (lengths.shape.ndims == 0)
# Verify that the shapes are compatible.
if self.is_ragged(axis):
if lengths_is_scalar:
condition = math_ops.equal(lengths, 1)
else:
condition = math_ops.reduce_all(
math_ops.equal(lengths, self.dimension_size(axis)))
else:
axis_dim_size = self.dimension_size(axis)
if lengths_is_scalar:
condition = (
math_ops.equal(lengths, 1) | math_ops.equal(axis_dim_size, 1)
| math_ops.equal(axis_dim_size, lengths))
else:
condition = math_ops.equal(axis_dim_size, 1)
broadcast_err = [
'Unable to broadcast: dimension size mismatch in dimension', axis,
'lengths=', lengths, 'dim_size=',
self.dimension_size(axis)
]
broadcast_check = control_flow_assert.Assert(
condition, data=broadcast_err, summarize=10)
with ops.control_dependencies([broadcast_check]):
# Partitioned dimensions:
if axis < self.num_partitioned_dimensions:
if self.is_ragged(axis):
# Use an identity op to make sure the check actually gets run.
return RaggedTensorDynamicShape(
self._partitioned_dim_sizes,
array_ops.identity(self.inner_dim_sizes), self.dim_size_dtype)
else:
return self._broadcast_uniform_partitioned_dimension(axis, lengths)
# Inner dimensions:
else:
if lengths_is_scalar:
return self._broadcast_inner_dimension_to_uniform(axis, lengths)
else:
if axis == 0:
raise ValueError('Unable to broadcast: '
'outermost dimension must be uniform.')
return self._broadcast_inner_dimension_to_ragged(axis, lengths)
def num_slices_in_dimension(self, axis):
"""Returns the total number of slices across the indicated dimension."""
if axis < 0:
return constant_op.constant(1, dtype=self.dim_size_dtype)
elif self.is_ragged(axis):
return math_ops.reduce_sum(self._partitioned_dim_sizes[axis])
else:
return self.dimension_size(axis) * self.num_slices_in_dimension(axis - 1)
def _broadcast_uniform_partitioned_dimension(self, axis, lengths):
"""Broadcasts the partitioned dimension `axis` to match `lengths`."""
axis_dim_size = self.dimension_size(axis)
partitioned_sizes = list(self._partitioned_dim_sizes[:axis])
if lengths.shape.ndims == 0:
lengths = array_ops.where(
math_ops.equal(axis_dim_size, 1), lengths, axis_dim_size)
repeats = array_ops.where(math_ops.equal(axis_dim_size, 1), lengths, 1)
splits = array_ops_stack.stack([0, self.num_slices_in_dimension(axis)])
else:
splits = math_ops.range(
array_ops.size(lengths, out_type=self.dim_size_dtype) + 1)
repeats = lengths
partitioned_sizes.append(lengths)
for dim_size in self._partitioned_dim_sizes[axis + 1:]:
if dim_size.shape.ndims == 0:
partitioned_sizes.append(dim_size)
splits *= dim_size
else:
partitioned_sizes.append(
ragged_util.repeat_ranges(dim_size, splits, repeats))
splits = array_ops.gather(
ragged_util.lengths_to_splits(dim_size), splits)
inner_sizes = self._inner_dim_sizes
return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes,
self.dim_size_dtype)
def _broadcast_inner_dimension_to_uniform(self, axis, length):
"""Broadcasts the inner dimension `axis` to match `lengths`."""
dim_size = self.dimension_size(axis)
axis_in_inner_dims = axis - self.num_partitioned_dimensions
partitioned_sizes = self._partitioned_dim_sizes
inner_sizes = array_ops.concat([
self._inner_dim_sizes[:axis_in_inner_dims],
[array_ops.where(math_ops.equal(dim_size, 1), length, dim_size)],
self._inner_dim_sizes[axis_in_inner_dims + 1:]
],
axis=0)
return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes,
self.dim_size_dtype)
def _broadcast_inner_dimension_to_ragged(self, axis, lengths):
axis_in_inner_dims = axis - self.num_partitioned_dimensions
partitioned_sizes = (
self._partitioned_dim_sizes + tuple([
self._inner_dim_sizes[i] for i in range(axis_in_inner_dims)
]) + (lengths,))
inner_sizes = self._inner_dim_sizes[axis_in_inner_dims + 1:]
return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes)
def with_dim_size_dtype(self, dtype):
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError('dtype must be int32 or int64')
if self.dim_size_dtype == dtype:
return self
return RaggedTensorDynamicShape(
[math_ops.cast(p, dtype) for p in self._partitioned_dim_sizes],
math_ops.cast(self._inner_dim_sizes, dtype))
def broadcast_dynamic_shape(shape_x, shape_y):
"""Returns the shape formed by broadcasting two shapes to be compatible.
Args:
shape_x: A `RaggedTensorDynamicShape`
shape_y: A `RaggedTensorDynamicShape`
Returns:
A `RaggedTensorDynamicShape`.
Raises:
ValueError: If `shape_x` and `shape_y` are not broadcast-compatible.
"""
if not isinstance(shape_x, RaggedTensorDynamicShape):
raise TypeError('shape_x must be a RaggedTensorDynamicShape')
if not isinstance(shape_y, RaggedTensorDynamicShape):
raise TypeError('shape_y must be a RaggedTensorDynamicShape')
# Broadcast both shapes to have the same rank.
if shape_x.rank is None or shape_y.rank is None:
raise ValueError('Unable to broadcast: unknown rank')
broadcast_rank = max(shape_x.rank, shape_y.rank)
shape_x = shape_x.broadcast_to_rank(broadcast_rank)
shape_y = shape_y.broadcast_to_rank(broadcast_rank)
# Broadcast dimensions one at a time, starting from the outermost dimension.
for axis in range(broadcast_rank):
shape_x = shape_x.broadcast_dimension(axis, shape_y.dimension_size(axis))
shape_y = shape_y.broadcast_dimension(axis, shape_x.dimension_size(axis))
return shape_x
def broadcast_to(rt_input, shape, broadcast_inner_dimensions=True):
"""Broadcasts a potentially ragged tensor to a ragged shape.
Tiles `rt_input` as necessary to match the given shape.
Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`.
Args:
rt_input: The potentially ragged tensor to broadcast.
shape: A `RaggedTensorDynamicShape`
broadcast_inner_dimensions: If false, then inner dimensions will not be
tiled.
Returns:
A potentially ragged tensor whose values are taken from
`rt_input`, and whose shape matches `shape`.
"""
if not isinstance(shape, RaggedTensorDynamicShape):
raise TypeError('shape must be a RaggedTensorDynamicShape')
rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)
# Broadcasting to a uniform shape.
if shape.num_partitioned_dimensions == 0:
return _broadcast_to_uniform_shape(rt_input, shape,
broadcast_inner_dimensions)
else:
return _broadcast_to_ragged_shape(rt_input, shape,
broadcast_inner_dimensions)
def _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions):
"""Broadcasts rt_input to the uniform shape `shape`."""
if isinstance(rt_input, ragged_tensor.RaggedTensor):
raise ValueError('Incompatible with shape: ragged rank mismatch')
if broadcast_inner_dimensions:
return array_ops.broadcast_to(rt_input, shape.inner_dim_sizes)
else:
return rt_input
def _broadcast_to_ragged_shape(rt_input, dst_shape, broadcast_inner_dimensions):
"""Broadcasts rt_input to the ragged shape `dst_shape`."""
# Check that rt_input and dst_shape have the same row_splits dtype.
if (isinstance(rt_input, ragged_tensor.RaggedTensor) and
rt_input.row_splits.dtype != dst_shape.dim_size_dtype):
if not ragged_config.auto_cast_partition_dtype():
raise ValueError('rt_input and dst_shape have different row_split '
'dtypes; use RaggedTensor.with_row_splits_dtype() or '
'RaggedTensorDynamicShape.with_dim_size_dtype() to '
'convert to a compatible dtype.')
rt_input = rt_input.with_row_splits_dtype(dtypes.int64)
dst_shape = dst_shape.with_dim_size_dtype(dtypes.int64)
# dst_shape's rank and ragged_rank must be greater than or equal to rt_input's
if rt_input.shape.ndims is None or dst_shape.rank is None:
raise ValueError('Unable to broadcast: unknown rank')
if rt_input.shape.ndims > dst_shape.rank:
raise ValueError('Incompatible with shape: rank mismatch')
if (isinstance(rt_input, ragged_tensor.RaggedTensor) and
rt_input.ragged_rank >= dst_shape.num_partitioned_dimensions):
raise ValueError('Incompatible with shape: ragged rank mismatch')
src_shape = RaggedTensorDynamicShape.from_tensor(rt_input)
src_shape = src_shape.broadcast_to_rank(dst_shape.rank)
# Add dimensions to rt_input so its rank and ragged_rank matches dst_shape.
if dst_shape.rank > rt_input.shape.ndims:
if rt_input.shape.ndims < dst_shape.num_inner_dimensions + 1:
rt_input = array_ops.reshape(
rt_input, array_ops.concat([[-1], dst_shape.inner_dim_sizes], axis=0))
for _ in range(dst_shape.rank - rt_input.shape.ndims):
if ragged_tensor.is_ragged(rt_input):
nrows = rt_input.nrows()
else:
nrows = array_ops.shape(rt_input,
out_type=dst_shape.dim_size_dtype)[0]
rt_input = ragged_tensor.RaggedTensor.from_row_lengths(rt_input, [nrows],
validate=False)
# Add ragged dimensions to match dst_shape.
if ragged_tensor.is_ragged(rt_input):
inner_rank_diff = (
rt_input.flat_values.shape.ndims - 1 - dst_shape.num_inner_dimensions)
if inner_rank_diff > 0:
rt_input = rt_input.with_flat_values(
ragged_tensor.RaggedTensor.from_tensor(
rt_input.flat_values, ragged_rank=inner_rank_diff,
row_splits_dtype=dst_shape.dim_size_dtype))
else:
rt_input = ragged_tensor.RaggedTensor.from_tensor(
rt_input, ragged_rank=dst_shape.num_partitioned_dimensions - 1,
row_splits_dtype=dst_shape.dim_size_dtype)
# Do broadcasting for any dimensions that will remain uniform. We can do
# these all at once, since they're independent of one another.
multiples = [1] * dst_shape.rank
for axis in range(dst_shape.num_partitioned_dimensions):
if not src_shape.is_ragged(axis) and not dst_shape.is_ragged(axis):
src_size = src_shape.dimension_size(axis)
dst_size = dst_shape.dimension_size(axis)
if ((tensor_util.constant_value(src_size) in (1, None)) and
(tensor_util.constant_value(dst_size) != 1)):
multiples[axis] = array_ops.where(
math_ops.equal(src_size, 1), dst_size, 1)
if not all(isinstance(v, int) and v == 1 for v in multiples):
multiples = array_ops_stack.stack(multiples, axis=0)
rt_input = ragged_array_ops.tile(rt_input, multiples)
if broadcast_inner_dimensions:
new_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(
rt_input.flat_values, out_type=dst_shape.dim_size_dtype),
array_ops.concat([[1], dst_shape.inner_dim_sizes], axis=0))
rt_input = rt_input.with_flat_values(
array_ops.broadcast_to(rt_input.flat_values, new_shape))
# Do broadcasting for dimensions that become ragged. We must do these from
# outermost to innermost.
for axis in range(dst_shape.num_partitioned_dimensions):
if not src_shape.is_ragged(axis) and dst_shape.is_ragged(axis):
dst_size = dst_shape.dimension_size(axis)
rt_input = _ragged_tile_axis(rt_input, axis, dst_size,
dst_shape.dim_size_dtype)
return rt_input
def _ragged_tile_axis(rt_input, axis, repeats, row_splits_dtype):
"""Tile a dimension of a RaggedTensor to match a ragged shape."""
assert axis > 0 # Outermost dimension may not be ragged.
if not ragged_tensor.is_ragged(rt_input):
rt_input = ragged_tensor.RaggedTensor.from_tensor(
rt_input, ragged_rank=1, row_splits_dtype=row_splits_dtype)
if axis > 1:
return rt_input.with_values(
_ragged_tile_axis(rt_input.values, axis - 1, repeats,
row_splits_dtype))
else:
src_row_splits = rt_input.nested_row_splits
src_row_lengths = rt_input.nested_row_lengths()
splits = src_row_splits[0]
dst_row_lengths = [repeats]
for i in range(1, len(src_row_lengths)):
dst_row_lengths.append(
ragged_util.repeat_ranges(src_row_lengths[i], splits, repeats))
splits = array_ops.gather(src_row_splits[i], splits)
dst_values = ragged_util.repeat_ranges(rt_input.flat_values, splits,
repeats)
return ragged_tensor.RaggedTensor.from_nested_row_lengths(
dst_values, dst_row_lengths, validate=False)
| RaggedTensorDynamicShape |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 114637,
"end": 115706
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("model_service.Model.to_dict"))
@mock.patch(VERTEX_AI_PATH.format("model_service.ModelServiceHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = ListModelVersionsOperator(
task_id=TASK_ID,
model_id=TEST_MODEL_NAME,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.list_model_versions.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model_id=TEST_MODEL_NAME,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestVertexAIListModelVersionsOperator |
python | getsentry__sentry | tests/sentry/auth_v2/utils/test_session.py | {
"start": 802,
"end": 2538
} | class ____(TestCase):
def setUp(self) -> None:
self.serializer = SessionSerializer()
def test_serialize_with_full_session_data(self) -> None:
request = self.make_request(method="GET")
request.session = MockSession()
request.session.data = {
"todo_email_verification": False,
"todo_2fa_verification": True,
"todo_password_reset": False,
"todo_2fa_setup": False,
"_auth_user_id": "123",
"session_orgs": ["org1", "org2"],
}
request.META["CSRF_COOKIE"] = "csrf_token_value"
result = self.serializer.serialize(obj=Request(request), attrs={}, user=self.user)
assert result == {
"todoEmailVerification": False,
"todo2faVerification": True,
"todoPasswordReset": False,
"todo2faSetup": False,
"userId": "123",
"sessionCsrfToken": "csrf_token_value",
"sessionExpiryDate": EXPIRY_DATE,
"sessionOrgs": ["org1", "org2"],
}
def test_serialize_with_empty_session_data(self) -> None:
request = self.make_request(method="GET")
request.session = MockSession()
request.META["CSRF_COOKIE"] = "csrf_token_value"
result = self.serializer.serialize(obj=Request(request), attrs={}, user=self.user)
assert result == {
"todoEmailVerification": None,
"todo2faVerification": None,
"todoPasswordReset": None,
"todo2faSetup": None,
"userId": None,
"sessionCsrfToken": "csrf_token_value",
"sessionExpiryDate": EXPIRY_DATE,
"sessionOrgs": None,
}
| SessionSerializerTest |
python | has2k1__plotnine | plotnine/typing.py | {
"start": 3617,
"end": 3754
} | class ____(Protocol):
"""
Transform function
"""
def __call__(self, x: TFloatArrayLike) -> TFloatArrayLike: ...
| PTransform |
python | marshmallow-code__marshmallow | src/marshmallow/validate.py | {
"start": 17390,
"end": 18589
} | class ____(Validator):
"""Call the specified ``method`` of the ``value`` object. The
validator succeeds if the invoked method returns an object that
evaluates to True in a Boolean context. Any additional keyword
argument will be passed to the method.
:param method: The name of the method to invoke.
:param error: Error message to raise in case of a validation error.
Can be interpolated with `{input}` and `{method}`.
:param kwargs: Additional keyword arguments to pass to the method.
"""
default_message = "Invalid input."
def __init__(self, method: str, *, error: str | None = None, **kwargs):
self.method = method
self.error: str = error or self.default_message
self.kwargs = kwargs
def _repr_args(self) -> str:
return f"method={self.method!r}, kwargs={self.kwargs!r}"
def _format_error(self, value: typing.Any) -> str:
return self.error.format(input=value, method=self.method)
def __call__(self, value: _T) -> _T:
method = getattr(value, self.method)
if not method(**self.kwargs):
raise ValidationError(self._format_error(value))
return value
| Predicate |
python | sympy__sympy | sympy/functions/elementary/exponential.py | {
"start": 1556,
"end": 4411
} | class ____(DefinedFunction):
unbranched = True
_singularities = (S.ComplexInfinity,)
@property
def kind(self):
return self.exp.kind
def inverse(self, argindex=1):
"""
Returns the inverse function of ``exp(x)``.
"""
return log
def as_numer_denom(self):
"""
Returns this with a positive exponent as a 2-tuple (a fraction).
Examples
========
>>> from sympy import exp
>>> from sympy.abc import x
>>> exp(-x).as_numer_denom()
(1, exp(x))
>>> exp(x).as_numer_denom()
(exp(x), 1)
"""
# this should be the same as Pow.as_numer_denom wrt
# exponent handling
if not self.is_commutative:
return self, S.One
exp = self.exp
neg_exp = exp.is_negative
if not neg_exp and not (-exp).is_negative:
neg_exp = exp.could_extract_minus_sign()
if neg_exp:
return S.One, self.func(-exp)
return self, S.One
@property
def exp(self):
"""
Returns the exponent of the function.
"""
return self.args[0]
def as_base_exp(self):
"""
Returns the 2-tuple (base, exponent).
"""
return self.func(1), Mul(*self.args)
def _eval_adjoint(self):
return self.func(self.exp.adjoint())
def _eval_conjugate(self):
return self.func(self.exp.conjugate())
def _eval_transpose(self):
return self.func(self.exp.transpose())
def _eval_is_finite(self):
arg = self.exp
if arg.is_infinite:
if arg.is_extended_negative:
return True
if arg.is_extended_positive:
return False
if arg.is_finite:
return True
def _eval_is_rational(self):
s = self.func(*self.args)
if s.func == self.func:
z = s.exp.is_zero
if z:
return True
elif s.exp.is_rational and fuzzy_not(z):
return False
else:
return s.is_rational
def _eval_is_zero(self):
return self.exp is S.NegativeInfinity
def _eval_power(self, other):
"""exp(arg)**e -> exp(arg*e) if assumptions allow it.
"""
b, e = self.as_base_exp()
return Pow._eval_power(Pow(b, e, evaluate=False), other)
def _eval_expand_power_exp(self, **hints):
from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
arg = self.args[0]
if arg.is_Add and arg.is_commutative:
return Mul.fromiter(self.func(x) for x in arg.args)
elif isinstance(arg, Sum) and arg.is_commutative:
return Product(self.func(arg.function), *arg.limits)
return self.func(arg)
| ExpBase |
python | pytorch__pytorch | torch/ao/nn/quantizable/modules/rnn.py | {
"start": 9863,
"end": 14636
} | class ____(torch.nn.Module):
r"""A single bi-directional LSTM layer."""
def __init__(
self,
input_dim: int,
hidden_dim: int,
bias: bool = True,
batch_first: bool = False,
bidirectional: bool = False,
device=None,
dtype=None,
*,
split_gates=False,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.batch_first = batch_first
self.bidirectional = bidirectional
self.layer_fw = _LSTMSingleLayer(
input_dim, hidden_dim, bias=bias, split_gates=split_gates, **factory_kwargs
)
if self.bidirectional:
self.layer_bw = _LSTMSingleLayer(
input_dim,
hidden_dim,
bias=bias,
split_gates=split_gates,
**factory_kwargs,
)
def forward(self, x: Tensor, hidden: tuple[Tensor, Tensor] | None = None):
if self.batch_first:
x = x.transpose(0, 1)
if hidden is None:
hx_fw, cx_fw = (None, None)
else:
hx_fw, cx_fw = hidden
hidden_bw: tuple[Tensor, Tensor] | None = None
if self.bidirectional:
if hx_fw is None:
hx_bw = None
else:
hx_bw = hx_fw[1]
hx_fw = hx_fw[0]
if cx_fw is None:
cx_bw = None
else:
cx_bw = cx_fw[1]
cx_fw = cx_fw[0]
if hx_bw is not None and cx_bw is not None:
hidden_bw = hx_bw, cx_bw
if hx_fw is None and cx_fw is None:
hidden_fw = None
else:
hidden_fw = (
torch.jit._unwrap_optional(hx_fw),
torch.jit._unwrap_optional(cx_fw),
)
result_fw, hidden_fw = self.layer_fw(x, hidden_fw)
if hasattr(self, "layer_bw") and self.bidirectional:
x_reversed = x.flip(0)
result_bw, hidden_bw = self.layer_bw(x_reversed, hidden_bw)
result_bw = result_bw.flip(0)
result = torch.cat([result_fw, result_bw], result_fw.dim() - 1)
if hidden_fw is None and hidden_bw is None:
h = None
c = None
elif hidden_fw is None:
(h, c) = torch.jit._unwrap_optional(hidden_bw)
elif hidden_bw is None:
(h, c) = torch.jit._unwrap_optional(hidden_fw)
else:
h = torch.stack([hidden_fw[0], hidden_bw[0]], 0) # type: ignore[list-item]
c = torch.stack([hidden_fw[1], hidden_bw[1]], 0) # type: ignore[list-item]
else:
result = result_fw
h, c = torch.jit._unwrap_optional(hidden_fw) # type: ignore[assignment]
if self.batch_first:
result.transpose_(0, 1)
return result, (h, c)
@classmethod
def from_float(cls, other, layer_idx=0, qconfig=None, **kwargs):
r"""
There is no FP equivalent of this class. This function is here just to
mimic the behavior of the `prepare` within the `torch.ao.quantization`
flow.
"""
assert hasattr(other, "qconfig") or (qconfig is not None)
input_size = kwargs.get("input_size", other.input_size)
hidden_size = kwargs.get("hidden_size", other.hidden_size)
bias = kwargs.get("bias", other.bias)
batch_first = kwargs.get("batch_first", other.batch_first)
bidirectional = kwargs.get("bidirectional", other.bidirectional)
split_gates = kwargs.get("split_gates", False)
layer = cls(
input_size,
hidden_size,
bias,
batch_first,
bidirectional,
split_gates=split_gates,
)
# pyrefly: ignore [bad-argument-type]
layer.qconfig = getattr(other, "qconfig", qconfig)
wi = getattr(other, f"weight_ih_l{layer_idx}")
wh = getattr(other, f"weight_hh_l{layer_idx}")
bi = getattr(other, f"bias_ih_l{layer_idx}", None)
bh = getattr(other, f"bias_hh_l{layer_idx}", None)
layer.layer_fw = _LSTMSingleLayer.from_params(
wi, wh, bi, bh, split_gates=split_gates
)
if other.bidirectional:
wi = getattr(other, f"weight_ih_l{layer_idx}_reverse")
wh = getattr(other, f"weight_hh_l{layer_idx}_reverse")
bi = getattr(other, f"bias_ih_l{layer_idx}_reverse", None)
bh = getattr(other, f"bias_hh_l{layer_idx}_reverse", None)
layer.layer_bw = _LSTMSingleLayer.from_params(
wi, wh, bi, bh, split_gates=split_gates
)
return layer
| _LSTMLayer |
python | huggingface__transformers | src/transformers/models/sam_hq/modeling_sam_hq.py | {
"start": 26574,
"end": 29671
} | class ____(nn.Module):
"""
SAM_HQ's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and
values.
"""
def __init__(self, config, downsample_rate=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate
self.internal_dim = config.hidden_size // downsample_rate
self.num_attention_heads = config.num_attention_heads
if self.internal_dim % config.num_attention_heads != 0:
raise ValueError("num_attention_heads must divide hidden_size.")
self.scaling = (self.internal_dim // config.num_attention_heads) ** -0.5
self.q_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.k_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.v_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.out_proj = nn.Linear(self.internal_dim, self.hidden_size)
self.is_causal = False
def _separate_heads(self, hidden_states: Tensor, num_attention_heads: int) -> Tensor:
batch, point_batch_size, n_tokens, channel = hidden_states.shape
c_per_head = channel // num_attention_heads
hidden_states = hidden_states.reshape(batch * point_batch_size, n_tokens, num_attention_heads, c_per_head)
return hidden_states.transpose(1, 2)
def _recombine_heads(self, hidden_states: Tensor, point_batch_size: int) -> Tensor:
batch, n_tokens, n_heads, c_per_head = hidden_states.shape
return hidden_states.reshape(batch // point_batch_size, point_batch_size, n_tokens, n_heads * c_per_head)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
attention_similarity: Optional[Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Tensor:
# Input projections
query = self.q_proj(query)
key = self.k_proj(key)
value = self.v_proj(value)
point_batch_size = query.shape[1]
# Separate into heads
query = self._separate_heads(query, self.num_attention_heads)
key = self._separate_heads(key, self.num_attention_heads)
value = self._separate_heads(value, self.num_attention_heads)
# SamHQAttention
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=attention_similarity,
dropout=0.0,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = self._recombine_heads(attn_output, point_batch_size)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
| SamHQAttention |
python | huggingface__transformers | src/transformers/models/opt/modeling_opt.py | {
"start": 1805,
"end": 3708
} | class ____(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(
self,
attention_mask: torch.LongTensor,
past_key_values_length: int = 0,
position_ids: Optional[torch.LongTensor] = None,
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
position_ids = torch.cumsum(attention_mask, dim=1)
position_ids = (position_ids * attention_mask - 1).long()
# cut positions if `past_key_values_length` is > 0
position_ids = position_ids[:, past_key_values_length:]
return super().forward(position_ids + self.offset)
# Copied from transformers.models.siglip.modeling_siglip.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| OPTLearnedPositionalEmbedding |
python | keras-team__keras | guides/training_with_built_in_methods.py | {
"start": 20330,
"end": 23512
} | class ____(keras.utils.PyDataset):
def __init__(self, x, y, batch_size, **kwargs):
super().__init__(**kwargs)
self.x = x
self.y = y
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size : (idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size : (idx + 1) * self.batch_size]
return batch_x, batch_y
train_py_dataset = ExamplePyDataset(x_train, y_train, batch_size=32)
val_py_dataset = ExamplePyDataset(x_val, y_val, batch_size=32)
"""
To fit the model, pass the dataset instead as the `x` argument (no need for a `y`
argument since the dataset includes the targets), and pass the validation dataset
as the `validation_data` argument. And no need for the `validation_batch_size`
argument, since the dataset is already batched!
"""
model = get_compiled_model()
model.fit(
train_py_dataset, batch_size=64, validation_data=val_py_dataset, epochs=1
)
"""
Evaluating the model is just as easy:
"""
model.evaluate(val_py_dataset)
"""
Importantly, `PyDataset` objects support three common constructor arguments
that handle the parallel processing configuration:
- `workers`: Number of workers to use in multithreading or
multiprocessing. Typically, you'd set it to the number of
cores on your CPU.
- `use_multiprocessing`: Whether to use Python multiprocessing for
parallelism. Setting this to `True` means that your
dataset will be replicated in multiple forked processes.
This is necessary to gain compute-level (rather than I/O level)
benefits from parallelism. However it can only be set to
`True` if your dataset can be safely pickled.
- `max_queue_size`: Maximum number of batches to keep in the queue
when iterating over the dataset in a multithreaded or
multiprocessed setting.
You can reduce this value to reduce the CPU memory consumption of
your dataset. It defaults to 10.
By default, multiprocessing is disabled (`use_multiprocessing=False`) and only
one thread is used. You should make sure to only turn on `use_multiprocessing` if
your code is running inside a Python `if __name__ == "__main__":` block in order
to avoid issues.
Here's a 4-thread, non-multiprocessed example:
"""
train_py_dataset = ExamplePyDataset(x_train, y_train, batch_size=32, workers=4)
val_py_dataset = ExamplePyDataset(x_val, y_val, batch_size=32, workers=4)
model = get_compiled_model()
model.fit(
train_py_dataset, batch_size=64, validation_data=val_py_dataset, epochs=1
)
"""
## Training & evaluation using PyTorch `DataLoader` objects
All built-in training and evaluation APIs are also compatible with `torch.utils.data.Dataset` and
`torch.utils.data.DataLoader` objects -- regardless of whether you're using the PyTorch backend,
or the JAX or TensorFlow backends. Let's take a look at a simple example.
Unlike `PyDataset` which are batch-centric, PyTorch `Dataset` objects are sample-centric:
the `__len__` method returns the number of samples,
and the `__getitem__` method returns a specific sample.
"""
| ExamplePyDataset |
python | pypa__warehouse | tests/unit/forklift/test_legacy.py | {
"start": 6384,
"end": 14865
} | class ____:
def test_defaults_to_true(self):
assert legacy._is_valid_dist_file("", "") == (True, None)
@pytest.mark.parametrize(
("filename", "filetype"),
[
("test.zip", "sdist"),
("test.whl", "bdist_wheel"),
],
)
def test_bails_with_invalid_zipfile(self, tmpdir, filename, filetype):
f = str(tmpdir.join(filename))
with open(f, "wb") as fp:
fp.write(b"this isn't a valid zip file")
assert legacy._is_valid_dist_file(f, filetype) == (
False,
"File is not a zipfile",
)
@pytest.mark.parametrize("filename", ["test.tar.gz"])
def test_bails_with_invalid_tarfile(self, tmpdir, filename):
fake_tar = str(tmpdir.join(filename))
with open(fake_tar, "wb") as fp:
fp.write(b"Definitely not a valid tar file.")
assert legacy._is_valid_dist_file(fake_tar, "sdist") == (
False,
"File is not a tarfile",
)
@pytest.mark.parametrize("filename", ["test.tar.gz"])
def test_bails_with_valid_tarfile_that_raises_exception(self, tmpdir, filename):
fake_tar = str(tmpdir.join(filename))
# Create a tarfile in memory
buffer = io.BytesIO()
with tarfile.open(fileobj=buffer, mode="w") as tar:
# Add a file with valid content
file_content = b"Hello, World!"
tarinfo = tarfile.TarInfo(name="example.txt")
tarinfo.size = len(file_content)
tar.addfile(tarinfo, io.BytesIO(file_content))
# Get the tar data
tar_data = buffer.getvalue()
# Corrupt the tar file by truncating it
corrupted_tar_data = tar_data[:-10] # Remove last 10 bytes
# Save the corrupted tar data to a file
with open(fake_tar, "wb") as f:
f.write(corrupted_tar_data)
# This should pass
assert tarfile.is_tarfile(fake_tar)
# This should fail
assert legacy._is_valid_dist_file(fake_tar, "sdist") == (False, None)
@pytest.mark.parametrize("compression", ["gz"])
def test_tarfile_validation_invalid(self, tmpdir, compression):
file_extension = f".{compression}" if compression else ""
tar_fn = str(tmpdir.join(f"test.tar{file_extension}"))
data_file = str(tmpdir.join("dummy_data"))
with open(data_file, "wb") as fp:
fp.write(b"Dummy data file.")
with tarfile.open(tar_fn, f"w:{compression}") as tar:
tar.add(data_file, arcname="package/__init__.py")
tar.add(data_file, arcname="package/module.py")
assert legacy._is_valid_dist_file(tar_fn, "sdist") == (
False,
"PKG-INFO not found at package/PKG-INFO",
)
@pytest.mark.parametrize("compression", ["gz"])
def test_tarfile_validation_valid(self, tmpdir, compression):
file_extension = f".{compression}" if compression else ""
tar_fn = str(tmpdir.join(f"test.tar{file_extension}"))
data_file = str(tmpdir.join("dummy_data"))
with open(data_file, "wb") as fp:
fp.write(b"Dummy data file.")
with tarfile.open(tar_fn, f"w:{compression}") as tar:
tar.add(data_file, arcname="package/module.py")
tar.add(data_file, arcname="package/PKG-INFO")
tar.add(data_file, arcname="package/data_file.txt")
assert legacy._is_valid_dist_file(tar_fn, "sdist") == (True, None)
def test_zip_no_pkg_info(self, tmpdir):
f = str(tmpdir.join("test.zip"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("package/something.txt", b"Just a placeholder file")
zfp.writestr("package/else.txt", b"Just a placeholder file")
assert legacy._is_valid_dist_file(f, "sdist") == (
False,
"PKG-INFO not found at package/PKG-INFO",
)
def test_zip_has_pkg_info(self, tmpdir):
f = str(tmpdir.join("test.zip"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("package/something.txt", b"Just a placeholder file")
zfp.writestr("package/PKG-INFO", b"this is the package info")
assert legacy._is_valid_dist_file(f, "sdist") == (True, None)
def test_zipfile_supported_compression(self, tmpdir):
f = str(tmpdir.join("test.zip"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("test-1.0/src/__init__.py", b"this is the module")
zfp.writestr("test-1.0/PKG-INFO", b"this is the package info")
zfp.writestr("test-1.0/1.txt", b"1", zipfile.ZIP_STORED)
zfp.writestr("test-1.0/2.txt", b"2", zipfile.ZIP_DEFLATED)
assert legacy._is_valid_dist_file(f, "") == (True, None)
@pytest.mark.parametrize("method", [zipfile.ZIP_BZIP2, zipfile.ZIP_LZMA])
def test_zipfile_unsupported_compression(self, tmpdir, method):
f = str(tmpdir.join("test.zip"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("test-1.0/1.txt", b"1", zipfile.ZIP_STORED)
zfp.writestr("test-1.0/2.txt", b"2", zipfile.ZIP_DEFLATED)
zfp.writestr("test-1.0/3.txt", b"3", method)
assert legacy._is_valid_dist_file(f, "") == (
False,
"File does not use a supported compression type",
)
def test_zipfile_exceeds_compression_threshold(self, tmpdir):
f = str(tmpdir.join("test.zip"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("PKG-INFO", b"this is the package info")
zfp.writestr(
"1.dat", b"0" * 65 * warehouse.constants.ONE_MIB, zipfile.ZIP_DEFLATED
)
assert legacy._is_valid_dist_file(f, "") == (
False,
"File exceeds compression ratio of 50",
)
def test_wheel_no_wheel_file(self, tmpdir):
f = str(tmpdir.join("test-1.0-py3-none-any.whl"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("something.txt", b"Just a placeholder file")
assert legacy._is_valid_dist_file(f, "bdist_wheel") == (
False,
"WHEEL not found at test-1.0.dist-info/WHEEL",
)
def test_wheel_has_wheel_file(self, tmpdir):
f = str(tmpdir.join("test-1.0-py3-none-any.whl"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("something.txt", b"Just a placeholder file")
zfp.writestr("test-1.0.dist-info/WHEEL", b"this is the package info")
assert legacy._is_valid_dist_file(f, "bdist_wheel") == (True, None)
def test_invalid_wheel_filename(self, tmpdir):
f = str(tmpdir.join("cheese.whl"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("something.txt", b"Just a placeholder file")
zfp.writestr("test-1.0.dist-info/WHEEL", b"this is the package info")
assert legacy._is_valid_dist_file(f, "bdist_wheel") == (
False,
"Unable to parse name and version from wheel filename",
)
def test_incorrect_number_of_top_level_directories_sdist_tar(self, tmpdir):
tar_fn = str(tmpdir.join("test.tar.gz"))
data_file = str(tmpdir.join("dummy_data"))
with open(data_file, "wb") as fp:
fp.write(b"Dummy data file.")
with tarfile.open(tar_fn, "w:gz") as tar:
tar.add(data_file, arcname="package/module.py")
tar.add(data_file, arcname="package/PKG-INFO")
tar.add(data_file, arcname="package/data_file.txt")
tar.add(data_file, arcname="notpackage/test.txt")
assert legacy._is_valid_dist_file(tar_fn, "sdist") == (
False,
"Incorrect number of top-level directories in sdist",
)
def test_incorrect_number_of_top_level_directories_sdist_zip(self, tmpdir):
f = str(tmpdir.join("test.zip"))
with zipfile.ZipFile(f, "w") as zfp:
zfp.writestr("test-1.0/src/__init__.py", b"this is the module")
zfp.writestr("test-1.0/PKG-INFO", b"this is the package info")
zfp.writestr("test-1.0/1.txt", b"1", zipfile.ZIP_STORED)
zfp.writestr("test-1.0/2.txt", b"2", zipfile.ZIP_DEFLATED)
zfp.writestr("notpackage/test.txt", b"2", zipfile.ZIP_DEFLATED)
assert legacy._is_valid_dist_file(f, "") == (
False,
"Incorrect number of top-level directories in sdist",
)
| TestFileValidation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.