language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_kinesis_analytics.py | {
"start": 1181,
"end": 3807
} | class ____:
SENSOR = KinesisAnalyticsV2StartApplicationCompletedSensor
APPLICATION_ARN = "arn:aws:kinesisanalytics:us-east-1:123456789012:application/demo"
def setup_method(self):
self.default_op_kwargs = dict(
task_id="start_application_sensor",
application_name="demo",
poke_interval=5,
max_retries=1,
)
self.sensor = self.SENSOR(**self.default_op_kwargs, aws_conn_id=None)
def test_base_aws_op_attributes(self):
op = self.SENSOR(**self.default_op_kwargs)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
op = self.SENSOR(
**self.default_op_kwargs,
aws_conn_id="aws-test-custom-conn",
region_name="eu-west-1",
verify=False,
botocore_config={"read_timeout": 42},
)
assert op.hook.aws_conn_id == "aws-test-custom-conn"
assert op.hook._region_name == "eu-west-1"
assert op.hook._verify is False
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
@pytest.mark.parametrize("state", SENSOR.SUCCESS_STATES)
@mock.patch.object(KinesisAnalyticsV2Hook, "conn")
def test_poke_success_state(self, mock_conn, state):
mock_conn.describe_application.return_value = {
"ApplicationDetail": {"ApplicationARN": self.APPLICATION_ARN, "ApplicationStatus": state}
}
assert self.sensor.poke({}) is True
@pytest.mark.parametrize("state", SENSOR.INTERMEDIATE_STATES)
@mock.patch.object(KinesisAnalyticsV2Hook, "conn")
def test_intermediate_state(self, mock_conn, state):
mock_conn.describe_application.return_value = {
"ApplicationDetail": {"ApplicationARN": self.APPLICATION_ARN, "ApplicationStatus": state}
}
assert self.sensor.poke({}) is False
@pytest.mark.parametrize("state", SENSOR.FAILURE_STATES)
@mock.patch.object(KinesisAnalyticsV2Hook, "conn")
def test_poke_failure_states(self, mock_conn, state):
mock_conn.describe_application.return_value = {
"ApplicationDetail": {"ApplicationARN": self.APPLICATION_ARN, "ApplicationStatus": state}
}
sensor = self.SENSOR(**self.default_op_kwargs, aws_conn_id=None)
with pytest.raises(
AirflowException, match="AWS Managed Service for Apache Flink application start failed"
):
sensor.poke({})
| TestKinesisAnalyticsV2StartApplicationCompletedSensor |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 77184,
"end": 81268
} | class ____(system_info):
section = 'blas'
dir_env_var = 'BLAS'
_lib_names = ['blas']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
opt = self.get_option_single('blas_libs', 'libraries')
blas_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, blas_libs, [])
if info is None:
return
else:
info['include_dirs'] = self.get_include_dirs()
if platform.system() == 'Windows':
# The check for windows is needed because get_cblas_libs uses the
# same compiler that was used to compile Python and msvc is
# often not installed when mingw is being used. This rough
# treatment is not desirable, but windows is tricky.
info['language'] = 'f77' # XXX: is it generally true?
# If cblas is given as an option, use those
cblas_info_obj = cblas_info()
cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries')
cblas_libs = cblas_info_obj.get_libs(cblas_opt, None)
if cblas_libs:
info['libraries'] = cblas_libs + blas_libs
info['define_macros'] = [('HAVE_CBLAS', None)]
else:
lib = self.get_cblas_libs(info)
if lib is not None:
info['language'] = 'c'
info['libraries'] = lib
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
def get_cblas_libs(self, info):
""" Check whether we can link with CBLAS interface
This method will search through several combinations of libraries
to check whether CBLAS is present:
1. Libraries in ``info['libraries']``, as is
2. As 1. but also explicitly adding ``'cblas'`` as a library
3. As 1. but also explicitly adding ``'blas'`` as a library
4. Check only library ``'cblas'``
5. Check only library ``'blas'``
Parameters
----------
info : dict
system information dictionary for compilation and linking
Returns
-------
libraries : list of str or None
a list of libraries that enables the use of CBLAS interface.
Returns None if not found or a compilation error occurs.
Since 1.17 returns a list.
"""
# primitive cblas check by looking for the header and trying to link
# cblas or blas
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
s = textwrap.dedent("""\
#include <cblas.h>
int main(int argc, const char *argv[])
{
double a[4] = {1,2,3,4};
double b[4] = {5,6,7,8};
return cblas_ddot(4, a, 1, b, 1) > 10;
}""")
src = os.path.join(tmpdir, 'source.c')
try:
with open(src, 'w') as f:
f.write(s)
try:
# check we can compile (find headers)
obj = c.compile([src], output_dir=tmpdir,
include_dirs=self.get_include_dirs())
except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError):
return None
# check we can link (find library)
# some systems have separate cblas and blas libs.
for libs in [info['libraries'], ['cblas'] + info['libraries'],
['blas'] + info['libraries'], ['cblas'], ['blas']]:
try:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=libs,
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
return libs
except distutils.ccompiler.LinkError:
pass
finally:
shutil.rmtree(tmpdir)
return None
| blas_info |
python | pypa__setuptools | setuptools/_distutils/compilers/C/errors.py | {
"start": 251,
"end": 362
} | class ____(Error):
"""Failure to create a static library from one or more C/C++ object
files."""
| LibError |
python | getsentry__sentry | tests/sentry/conduit/test_tasks.py | {
"start": 2759,
"end": 7881
} | class ____(TestCase):
@responses.activate
@override_settings(
CONDUIT_PUBLISH_SECRET="test-secret",
CONDUIT_PUBLISH_JWT_ISSUER="sentry",
CONDUIT_PUBLISH_JWT_AUDIENCE="conduit",
CONDUIT_PUBLISH_URL="http://localhost:9093",
)
def test_publish_data_success(self):
"""Test successful publish request."""
org_id = 123
channel_id = str(uuid4())
token = generate_jwt(subject="test")
publish_request = PublishRequest(
channel_id=channel_id,
message_id=str(uuid4()),
sequence=0,
client_timestamp=get_timestamp(),
phase=Phase.PHASE_START,
)
responses.add(
responses.POST,
f"http://localhost:9093/publish/{org_id}/{channel_id}",
status=200,
)
response = publish_data(
org_id=org_id,
publish_request=publish_request,
token=token,
publish_url="http://localhost:9093",
)
assert response.status_code == 200
assert len(responses.calls) == 1
request = responses.calls[0].request
assert request.headers["Authorization"] == f"Bearer {token}"
assert request.headers["Content-Type"] == "application/x-protobuf"
@responses.activate
@override_settings(
CONDUIT_PUBLISH_SECRET="test-secret",
CONDUIT_PUBLISH_JWT_ISSUER="sentry",
CONDUIT_PUBLISH_JWT_AUDIENCE="conduit",
CONDUIT_PUBLISH_URL="http://localhost:9093",
)
def test_publish_data_retry_on_failure(self):
"""Test that publish_data retries on RequestException."""
org_id = 123
channel_id = str(uuid4())
token = generate_jwt(subject="test")
publish_request = PublishRequest(
channel_id=channel_id,
message_id=str(uuid4()),
sequence=0,
client_timestamp=get_timestamp(),
phase=Phase.PHASE_START,
)
# Fails twice, then succeeds
responses.add(
responses.POST,
f"http://localhost:9093/publish/{org_id}/{channel_id}",
status=500,
)
responses.add(
responses.POST,
f"http://localhost:9093/publish/{org_id}/{channel_id}",
status=500,
)
responses.add(
responses.POST,
f"http://localhost:9093/publish/{org_id}/{channel_id}",
status=200,
)
response = publish_data(
org_id=org_id,
publish_request=publish_request,
token=token,
publish_url="http://localhost:9093",
)
assert response.status_code == 200
assert len(responses.calls) == 3
@responses.activate
@override_settings(
CONDUIT_PUBLISH_SECRET="test-secret",
CONDUIT_PUBLISH_JWT_ISSUER="sentry",
CONDUIT_PUBLISH_JWT_AUDIENCE="conduit",
CONDUIT_PUBLISH_URL="http://localhost:9093",
)
def test_publish_data_max_retries_exceeded(self):
"""Test that publish_data raises after max retries."""
org_id = 123
channel_id = str(uuid4())
token = generate_jwt(subject="test")
publish_request = PublishRequest(
channel_id=channel_id,
message_id=str(uuid4()),
sequence=0,
client_timestamp=get_timestamp(),
phase=Phase.PHASE_START,
)
for _ in range(PUBLISH_REQUEST_MAX_RETRIES):
responses.add(
responses.POST,
f"http://localhost:9093/publish/{org_id}/{channel_id}",
status=500,
)
with pytest.raises(RequestException):
publish_data(
org_id=org_id,
publish_request=publish_request,
token=token,
)
assert len(responses.calls) == PUBLISH_REQUEST_MAX_RETRIES
@responses.activate
@override_settings(
CONDUIT_PUBLISH_SECRET="test-secret",
CONDUIT_PUBLISH_JWT_ISSUER="sentry",
CONDUIT_PUBLISH_JWT_AUDIENCE="conduit",
CONDUIT_PUBLISH_URL="http://localhost:9093",
)
def test_publish_data_uses_custom_url(self):
"""Test that publish_data uses provided publish_url."""
org_id = 123
channel_id = str(uuid4())
token = generate_jwt(subject="test")
custom_url = "http://custom.example.com"
publish_request = PublishRequest(
channel_id=channel_id,
message_id=str(uuid4()),
sequence=0,
client_timestamp=get_timestamp(),
phase=Phase.PHASE_START,
)
responses.add(
responses.POST,
f"{custom_url}/publish/{org_id}/{channel_id}",
status=200,
)
response = publish_data(
org_id=org_id,
publish_request=publish_request,
token=token,
publish_url=custom_url,
)
assert response.status_code == 200
assert len(responses.calls) == 1
| PublishDataTest |
python | huggingface__transformers | src/transformers/models/llama/modeling_llama.py | {
"start": 2002,
"end": 2725
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
LlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| LlamaRMSNorm |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 227845,
"end": 232592
} | class ____:
N = 20
# data for most tests
rng = np.random.default_rng(169708062)
a = np.vstack((np.arange(3*N//4), rng.random(3*N//4)))
b = np.vstack((np.arange(N//4) + 100, rng.random(N//4)))
# data for equal variance tests
a2 = np.arange(10)
b2 = np.arange(10) + 100
# data for exact test
a3 = [1, 2]
b3 = [3, 4]
# data for bigger test
rvs1 = stats.norm.rvs(loc=5, scale=10, # type: ignore
size=500, random_state=rng).reshape(100, 5).T
rvs2 = stats.norm.rvs(loc=8, scale=20, size=100, random_state=rng) # type: ignore
p_d = [1/1001, (676+1)/1001] # desired pvalues
p_d_gen = [1/1001, (672 + 1)/1001] # desired pvalues for Generator seed
p_d_big = [(993+1)/1001, (685+1)/1001, (840+1)/1001,
(955+1)/1001, (255+1)/1001]
params = [
(a, b, {"axis": 1}, p_d), # basic test
(a.T, b.T, {'axis': 0}, p_d), # along axis 0
(a[0, :], b[0, :], {'axis': None}, p_d[0]), # 1d data
(a[0, :].tolist(), b[0, :].tolist(), {'axis': None}, p_d[0]),
# different seeds
(a, b, {'random_state': 0, "axis": 1}, p_d),
(a, b, {'random_state': np.random.RandomState(0), "axis": 1}, p_d),
(a2, b2, {'equal_var': True}, 1/1001), # equal variances
(rvs1, rvs2, {'axis': -1, 'random_state': 0}, p_d_big), # bigger test
(a3, b3, {}, 1/3), # exact test
(a, b, {'random_state': np.random.default_rng(0), "axis": 1}, p_d_gen),
]
@pytest.mark.parametrize("alternative", ['less', 'greater', 'two-sided'])
@pytest.mark.parametrize("shape", [(12,), (2, 12)])
def test_permutation_method(self, alternative, shape):
rng = np.random.default_rng(2348934579834565)
x = rng.random(size=shape)
y = rng.random(size=13)
kwargs = dict(n_resamples=999)
# Use ttest_ind with `method`
rng = np.random.default_rng(348934579834565)
method = stats.PermutationMethod(rng=rng, **kwargs)
res = stats.ttest_ind(x, y, axis=-1, alternative=alternative, method=method)
# Use `permutation_test` directly
def statistic(x, y, axis): return stats.ttest_ind(x, y, axis=axis).statistic
rng = np.random.default_rng(348934579834565)
ref = stats.permutation_test((x, y), statistic, axis=-1, rng=rng,
alternative=alternative, **kwargs)
assert_equal(res.statistic, ref.statistic)
assert_equal(res.pvalue, ref.pvalue)
# Sanity check against theoretical t-test
ref = stats.ttest_ind(x, y, axis=-1, alternative=alternative)
assert_equal(res.statistic, ref.statistic)
assert_allclose(res.pvalue, ref.pvalue, rtol=3e-2)
@pytest.mark.parametrize("alternative", ['less', 'greater', 'two-sided'])
@pytest.mark.parametrize("shape", [(12,), (2, 12)])
def test_monte_carlo_method(self, alternative, shape):
rng = np.random.default_rng(2348934579834565)
x = rng.random(size=shape)
y = rng.random(size=13)
kwargs = dict(n_resamples=999)
# Use `monte_carlo` directly
def statistic(x, y, axis): return stats.ttest_ind(x, y, axis=axis).statistic
rng = np.random.default_rng(348934579834565)
rvs = [rng.standard_normal, rng.standard_normal]
ref = stats.monte_carlo_test((x, y), rvs=rvs, statistic=statistic, axis=-1,
alternative=alternative, **kwargs)
# Use ttest_ind with `method`
rng = np.random.default_rng(348934579834565)
rvs = [rng.standard_normal, rng.standard_normal]
method = stats.MonteCarloMethod(rvs=rvs, **kwargs)
res = stats.ttest_ind(x, y, axis=-1, alternative=alternative, method=method)
assert_equal(res.statistic, ref.statistic)
assert_equal(res.pvalue, ref.pvalue)
# Passing `rng` instead of `rvs`
method = stats.MonteCarloMethod(rng=348934579834565, **kwargs)
res = stats.ttest_ind(x, y, axis=-1, alternative=alternative, method=method)
assert_equal(res.statistic, ref.statistic)
assert_equal(res.pvalue, ref.pvalue)
# Sanity check against theoretical t-test
ref = stats.ttest_ind(x, y, axis=-1, alternative=alternative)
assert_equal(res.statistic, ref.statistic)
assert_allclose(res.pvalue, ref.pvalue, rtol=6e-2)
def test_resampling_input_validation(self):
message = "`method` must be an instance of `PermutationMethod`, an instance..."
with pytest.raises(ValueError, match=message):
stats.ttest_ind([1, 2, 3], [4, 5, 6], method='migratory')
| Test_ttest_ind_permutations |
python | apache__airflow | providers/grpc/tests/unit/grpc/operators/test_grpc.py | {
"start": 913,
"end": 1025
} | class ____:
def __init__(self, channel):
pass
def stream_call(self, data):
pass
| StubClass |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 1915,
"end": 1968
} | class ____(Error): # noqa: N818
pass
| NoStaticFiles |
python | graphql-python__graphene | graphene/validation/tests/test_depth_limit_validator.py | {
"start": 345,
"end": 443
} | class ____(ObjectType):
class meta:
name = "Cat"
interfaces = (PetType,)
| CatType |
python | python-openxml__python-docx | src/docx/oxml/text/parfmt.py | {
"start": 10347,
"end": 10747
} | class ____(BaseOxmlElement):
"""``<w:spacing>`` element, specifying paragraph spacing attributes such as space
before and line spacing."""
after = OptionalAttribute("w:after", ST_TwipsMeasure)
before = OptionalAttribute("w:before", ST_TwipsMeasure)
line = OptionalAttribute("w:line", ST_SignedTwipsMeasure)
lineRule = OptionalAttribute("w:lineRule", WD_LINE_SPACING)
| CT_Spacing |
python | facebookresearch__faiss | tests/test_index.py | {
"start": 17671,
"end": 18180
} | class ____(unittest.TestCase):
def test_l2_pos(self):
"""
roundoff errors occur only with the L2 decomposition used
with BLAS, ie. in IndexFlatL2 and with
n > distance_compute_blas_threshold = 20
"""
d = 128
n = 100
rs = np.random.RandomState(1234)
x = rs.rand(n, d).astype('float32')
index = faiss.IndexFlatL2(d)
index.add(x)
D, I = index.search(x, 10)
assert np.all(D >= 0)
| TestDistancesPositive |
python | MongoEngine__mongoengine | tests/fields/test_decimal128_field.py | {
"start": 617,
"end": 4527
} | class ____(MongoDBTestCase):
def test_decimal128_validation_good(self):
doc = Decimal128Document()
doc.dec128_fld = Decimal(0)
doc.validate()
doc.dec128_fld = Decimal(50)
doc.validate()
doc.dec128_fld = Decimal(110)
doc.validate()
doc.dec128_fld = Decimal("110")
doc.validate()
def test_decimal128_validation_invalid(self):
"""Ensure that invalid values cannot be assigned."""
doc = Decimal128Document()
doc.dec128_fld = "ten"
with pytest.raises(ValidationError):
doc.validate()
def test_decimal128_validation_min(self):
"""Ensure that out of bounds values cannot be assigned."""
doc = Decimal128Document()
doc.dec128_min_0 = Decimal(50)
doc.validate()
doc.dec128_min_0 = Decimal(-1)
with pytest.raises(ValidationError):
doc.validate()
def test_decimal128_validation_max(self):
"""Ensure that out of bounds values cannot be assigned."""
doc = Decimal128Document()
doc.dec128_max_100 = Decimal(50)
doc.validate()
doc.dec128_max_100 = Decimal(101)
with pytest.raises(ValidationError):
doc.validate()
def test_eq_operator(self):
cls = generate_test_cls()
assert cls.objects(dec128_fld=1.0).count() == 1
assert cls.objects(dec128_fld=2.0).count() == 0
def test_ne_operator(self):
cls = generate_test_cls()
assert cls.objects(dec128_fld__ne=None).count() == 1
assert cls.objects(dec128_fld__ne=1).count() == 1
assert cls.objects(dec128_fld__ne=1.0).count() == 1
def test_gt_operator(self):
cls = generate_test_cls()
assert cls.objects(dec128_fld__gt=0.5).count() == 1
def test_lt_operator(self):
cls = generate_test_cls()
assert cls.objects(dec128_fld__lt=1.5).count() == 1
def test_field_exposed_as_python_Decimal(self):
# from int
model = Decimal128Document(dec128_fld=100).save()
assert isinstance(model.dec128_fld, Decimal)
model = Decimal128Document.objects.get(id=model.id)
assert isinstance(model.dec128_fld, Decimal)
assert model.dec128_fld == Decimal("100")
def test_storage(self):
# from int
model = Decimal128Document(dec128_fld=100).save()
assert get_as_pymongo(model) == {
"_id": model.id,
"dec128_fld": Decimal128("100"),
}
# from str
model = Decimal128Document(dec128_fld="100.0").save()
assert get_as_pymongo(model) == {
"_id": model.id,
"dec128_fld": Decimal128("100.0"),
}
# from float
model = Decimal128Document(dec128_fld=100.0).save()
assert get_as_pymongo(model) == {
"_id": model.id,
"dec128_fld": Decimal128("100"),
}
# from Decimal
model = Decimal128Document(dec128_fld=Decimal(100)).save()
assert get_as_pymongo(model) == {
"_id": model.id,
"dec128_fld": Decimal128("100"),
}
model = Decimal128Document(dec128_fld=Decimal("100.0")).save()
assert get_as_pymongo(model) == {
"_id": model.id,
"dec128_fld": Decimal128("100.0"),
}
# from Decimal128
model = Decimal128Document(dec128_fld=Decimal128("100")).save()
assert get_as_pymongo(model) == {
"_id": model.id,
"dec128_fld": Decimal128("100"),
}
def test_json(self):
Decimal128Document.drop_collection()
f = str(random.random())
Decimal128Document(dec128_fld=f).save()
json_str = Decimal128Document.objects.to_json()
array = json.loads(json_str)
assert array[0]["dec128_fld"] == {"$numberDecimal": str(f)}
| TestDecimal128Field |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/test_setops.py | {
"start": 19877,
"end": 24407
} | class ____:
def test_union(self, sort):
rng = bdate_range(START, END)
# overlapping
left = rng[:10]
right = rng[5:10]
the_union = left.union(right, sort=sort)
assert isinstance(the_union, DatetimeIndex)
# non-overlapping, gap in middle
left = rng[:5]
right = rng[10:]
the_union = left.union(right, sort=sort)
assert isinstance(the_union, Index)
# non-overlapping, no gap
left = rng[:5]
right = rng[5:10]
the_union = left.union(right, sort=sort)
assert isinstance(the_union, DatetimeIndex)
# order does not matter
if sort is None:
tm.assert_index_equal(right.union(left, sort=sort), the_union)
else:
expected = DatetimeIndex(list(right) + list(left))
tm.assert_index_equal(right.union(left, sort=sort), expected)
# overlapping, but different offset
rng = date_range(START, END, freq=BMonthEnd())
the_union = rng.union(rng, sort=sort)
assert isinstance(the_union, DatetimeIndex)
def test_union_not_cacheable(self, sort):
rng = date_range("1/1/2000", periods=50, freq=Minute())
rng1 = rng[10:]
rng2 = rng[:25]
the_union = rng1.union(rng2, sort=sort)
if sort is None:
tm.assert_index_equal(the_union, rng)
else:
expected = DatetimeIndex(list(rng[10:]) + list(rng[:10]))
tm.assert_index_equal(the_union, expected)
rng1 = rng[10:]
rng2 = rng[15:35]
the_union = rng1.union(rng2, sort=sort)
expected = rng[10:]
tm.assert_index_equal(the_union, expected)
def test_intersection(self):
rng = date_range("1/1/2000", periods=50, freq=Minute(), unit="ns")
rng1 = rng[10:]
rng2 = rng[:25]
the_int = rng1.intersection(rng2)
expected = rng[10:25]
tm.assert_index_equal(the_int, expected)
assert isinstance(the_int, DatetimeIndex)
assert the_int.freq == rng.freq
the_int = rng1.intersection(rng2)
tm.assert_index_equal(the_int, expected)
# non-overlapping
the_int = rng[:10].intersection(rng[10:])
expected = DatetimeIndex([]).as_unit("ns")
tm.assert_index_equal(the_int, expected)
def test_intersection_bug(self):
# GH #771
a = bdate_range("11/30/2011", "12/31/2011")
b = bdate_range("12/10/2011", "12/20/2011")
result = a.intersection(b)
tm.assert_index_equal(result, b)
assert result.freq == b.freq
def test_intersection_list(self):
# GH#35876
# values is not an Index -> no name -> retain "a"
values = [Timestamp("2020-01-01"), Timestamp("2020-02-01")]
idx = DatetimeIndex(values, name="a")
res = idx.intersection(values)
tm.assert_index_equal(res, idx)
def test_month_range_union_tz_pytz(self, sort):
pytz = pytest.importorskip("pytz")
tz = pytz.timezone("US/Eastern")
early_start = datetime(2011, 1, 1)
early_end = datetime(2011, 3, 1)
late_start = datetime(2011, 3, 1)
late_end = datetime(2011, 5, 1)
early_dr = date_range(start=early_start, end=early_end, tz=tz, freq=MonthEnd())
late_dr = date_range(start=late_start, end=late_end, tz=tz, freq=MonthEnd())
early_dr.union(late_dr, sort=sort)
@td.skip_if_windows
def test_month_range_union_tz_dateutil(self, sort):
from pandas._libs.tslibs.timezones import dateutil_gettz
tz = dateutil_gettz("US/Eastern")
early_start = datetime(2011, 1, 1)
early_end = datetime(2011, 3, 1)
late_start = datetime(2011, 3, 1)
late_end = datetime(2011, 5, 1)
early_dr = date_range(start=early_start, end=early_end, tz=tz, freq=MonthEnd())
late_dr = date_range(start=late_start, end=late_end, tz=tz, freq=MonthEnd())
early_dr.union(late_dr, sort=sort)
@pytest.mark.parametrize("sort", [False, None])
def test_intersection_duplicates(self, sort):
# GH#38196
idx1 = Index(
[
Timestamp("2019-12-13"),
Timestamp("2019-12-12"),
Timestamp("2019-12-12"),
]
)
result = idx1.intersection(idx1, sort=sort)
expected = Index([Timestamp("2019-12-13"), Timestamp("2019-12-12")])
tm.assert_index_equal(result, expected)
| TestBusinessDatetimeIndex |
python | SmileyChris__easy-thumbnails | easy_thumbnails/files.py | {
"start": 5028,
"end": 10036
} | class ____(ImageFieldFile):
"""
A thumbnailed file.
This can be used just like a Django model instance's property for a file
field (i.e. an ``ImageFieldFile`` object).
"""
def __init__(self, name, file=None, storage=None, thumbnail_options=None,
*args, **kwargs):
fake_field = FakeField(storage=storage)
super().__init__(FakeInstance(), fake_field, name, *args, **kwargs)
del self.field
if file:
self.file = file
if thumbnail_options is None:
thumbnail_options = ThumbnailOptions()
elif not isinstance(thumbnail_options, ThumbnailOptions):
thumbnail_options = ThumbnailOptions(thumbnail_options)
self.thumbnail_options = thumbnail_options
def save(self, *args, **kwargs):
# Can't save a ``ThumbnailFile`` directly.
raise NotImplementedError()
def delete(self, *args, **kwargs):
# Can't delete a ``ThumbnailFile`` directly, it doesn't have a
# reference to the source image, so it can't update the cache. If you
# really need to do this, do it with ``self.storage.delete`` directly.
raise NotImplementedError()
# Be consistant with standard behaviour, even though these methods don't
# actually alter data any more.
save.alters_data = True
delete.alters_data = True
def _get_image(self):
"""
Get a PIL Image instance of this file.
The image is cached to avoid the file needing to be read again if the
function is called again.
"""
if not hasattr(self, '_image_cache'):
from easy_thumbnails.source_generators import pil_image
self.image = pil_image(self)
return self._image_cache
def _set_image(self, image):
"""
Set the image for this file.
This also caches the dimensions of the image.
"""
if image:
self._image_cache = image
self._dimensions_cache = image.size
else:
if hasattr(self, '_image_cache'):
del self._cached_image
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
image = property(_get_image, _set_image)
def tag(self, alt='', use_size=None, **attrs):
"""
Return a standard XHTML ``<img ... />`` tag for this field.
:param alt: The ``alt=""`` text for the tag. Defaults to ``''``.
:param use_size: Whether to get the size of the thumbnail image for use
in the tag attributes. If ``None`` (default), the size will only
be used it if won't result in a remote file retrieval.
All other keyword parameters are added as (properly escaped) extra
attributes to the `img` tag.
"""
if use_size is None:
if getattr(self, '_dimensions_cache', None):
use_size = True
else:
try:
self.storage.path(self.name)
use_size = True
except NotImplementedError:
use_size = False
attrs['alt'] = alt
attrs['src'] = self.url
if use_size:
attrs.update(dict(width=self.width, height=self.height))
attrs = ' '.join(['%s="%s"' % (key, escape(value))
for key, value in sorted(attrs.items())])
return mark_safe('<img %s />' % attrs)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, value):
if value is not None and not isinstance(value, File):
value = File(value)
self._file = value
self._committed = False
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def open(self, mode=None, *args, **kwargs):
if self.closed and self.name:
mode = mode or getattr(self, 'mode', None) or 'rb'
self.file = self.storage.open(self.name, mode)
else:
return super().open(mode, *args, **kwargs)
def _get_image_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = database_get_image_dimensions(
self, close=close)
return self._dimensions_cache
def set_image_dimensions(self, thumbnail):
"""
Set image dimensions from the cached dimensions of a ``Thumbnail``
model instance.
"""
try:
dimensions = getattr(thumbnail, 'dimensions', None)
except models.ThumbnailDimensions.DoesNotExist:
dimensions = None
if not dimensions:
return False
self._dimensions_cache = dimensions.size
return self._dimensions_cache
| ThumbnailFile |
python | aio-libs__aiohttp | examples/fake_server.py | {
"start": 1262,
"end": 3957
} | class ____:
def __init__(self) -> None:
self.app = web.Application()
self.app.router.add_routes(
[
web.get("/v2.7/me", self.on_me),
web.get("/v2.7/me/friends", self.on_my_friends),
]
)
self.runner = web.AppRunner(self.app)
here = pathlib.Path(__file__)
ssl_cert = here.parent / "server.crt"
ssl_key = here.parent / "server.key"
self.ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.ssl_context.load_cert_chain(str(ssl_cert), str(ssl_key))
async def start(self) -> dict[str, int]:
port = test_utils.unused_port()
await self.runner.setup()
site = web.TCPSite(self.runner, "127.0.0.1", port, ssl_context=self.ssl_context)
await site.start()
return {"graph.facebook.com": port}
async def stop(self) -> None:
await self.runner.cleanup()
async def on_me(self, request: web.Request) -> web.StreamResponse:
return web.json_response({"name": "John Doe", "id": "12345678901234567"})
async def on_my_friends(self, request: web.Request) -> web.StreamResponse:
return web.json_response(
{
"data": [
{"name": "Bill Doe", "id": "233242342342"},
{"name": "Mary Doe", "id": "2342342343222"},
{"name": "Alex Smith", "id": "234234234344"},
],
"paging": {
"cursors": {
"before": "QVFIUjRtc2c5NEl0ajN",
"after": "QVFIUlpFQWM0TmVuaDRad0dt",
},
"next": (
"https://graph.facebook.com/v2.7/12345678901234567/"
"friends?access_token=EAACEdEose0cB"
),
},
"summary": {"total_count": 3},
}
)
async def main() -> None:
token = "ER34gsSGGS34XCBKd7u"
fake_facebook = FakeFacebook()
info = await fake_facebook.start()
resolver = FakeResolver(info)
connector = TCPConnector(resolver=resolver, ssl=False)
async with ClientSession(connector=connector) as session:
async with session.get(
"https://graph.facebook.com/v2.7/me", params={"access_token": token}
) as resp:
print(await resp.json())
async with session.get(
"https://graph.facebook.com/v2.7/me/friends", params={"access_token": token}
) as resp:
print(await resp.json())
await fake_facebook.stop()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| FakeFacebook |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/graph.py | {
"start": 907,
"end": 1410
} | class ____(TypedDict):
"""Dictionary of labels for nodes and edges in a graph."""
nodes: dict[str, str]
"""Labels for nodes."""
edges: dict[str, str]
"""Labels for edges."""
def is_uuid(value: str) -> bool:
"""Check if a string is a valid UUID.
Args:
value: The string to check.
Returns:
`True` if the string is a valid UUID, `False` otherwise.
"""
try:
UUID(value)
except ValueError:
return False
return True
| LabelsDict |
python | sympy__sympy | sympy/assumptions/predicates/matrices.py | {
"start": 9316,
"end": 9910
} | class ____(Predicate):
"""
Complex elements matrix predicate.
Explanation
===========
``Q.complex_elements(x)`` is true iff all the elements of ``x``
are complex numbers.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.complex(X[1, 2]), Q.complex_elements(X))
True
>>> ask(Q.complex_elements(X), Q.integer_elements(X))
True
"""
name = "complex_elements"
handler = Dispatcher("ComplexElementsHandler", doc="Handler for key 'complex_elements'.")
| ComplexElementsPredicate |
python | huggingface__transformers | src/transformers/models/qwen3_next/modular_qwen3_next.py | {
"start": 31813,
"end": 35501
} | class ____(Qwen3NextPreTrainedModel):
def __init__(self, config: Qwen3NextConfig):
super().__init__(config)
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.layers = nn.ModuleList(
[Qwen3NextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Qwen3NextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Qwen3NextRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = Qwen3NextDynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
linear_attn_mask = self._update_linear_attn_mask(attention_mask, cache_position)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
layer_mask = linear_attn_mask if decoder_layer.layer_type == "linear_attention" else causal_mask
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=layer_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def _update_linear_attn_mask(self, attention_mask, cache_position):
"""
NOTE: Left-padding is used for linear attention mask.
No need for zeroing states when
1. Cached forward
2. Attending to all inputs
"""
linear_attn_mask = attention_mask
if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)):
linear_attn_mask = None
return linear_attn_mask
| Qwen3NextModel |
python | pytorch__pytorch | tools/linter/adapters/codespell_linter.py | {
"start": 661,
"end": 5824
} | class ____(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def format_error_message(
filename: str,
error: Exception | None = None,
*,
message: str | None = None,
) -> LintMessage:
if message is None and error is not None:
message = (
f"Failed due to {error.__class__.__name__}:\n{error}\n"
"Please either fix the error or add the word(s) to the dictionary file.\n"
"HINT: all-lowercase words in the dictionary can cover all case variations."
)
return LintMessage(
path=filename,
line=None,
char=None,
code="CODESPELL",
severity=LintSeverity.ERROR,
name="spelling error",
original=None,
replacement=None,
description=message,
)
def run_codespell(path: Path) -> str:
try:
return subprocess.check_output(
[
sys.executable,
"-m",
"codespell_lib",
"--toml",
str(PYPROJECT),
str(path),
],
stderr=subprocess.STDOUT,
text=True,
encoding="utf-8",
)
except subprocess.CalledProcessError as exc:
raise ValueError(exc.output) from exc
def check_file(filename: str) -> list[LintMessage]:
path = Path(filename).absolute()
# Check if file is too large
try:
file_size = os.path.getsize(path)
if file_size > MAX_FILE_SIZE:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CODESPELL",
severity=LintSeverity.WARNING,
name="file-too-large",
original=None,
replacement=None,
description=f"File size ({file_size} bytes) exceeds {MAX_FILE_SIZE} bytes limit, skipping",
)
]
except OSError as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="CODESPELL",
severity=LintSeverity.ERROR,
name="file-access-error",
original=None,
replacement=None,
description=f"Failed to get file size: {err}",
)
]
try:
run_codespell(path)
except Exception as err:
return [format_error_message(filename, err)]
return []
def check_dictionary(filename: str) -> list[LintMessage]:
"""Check the dictionary file for duplicates."""
path = Path(filename).absolute()
try:
words = path.read_text(encoding="utf-8").splitlines()
words_set = set(words)
if len(words) != len(words_set):
raise ValueError("The dictionary file contains duplicate entries.")
# pyrefly: ignore [no-matching-overload]
uncased_words = list(map(str.lower, words))
if uncased_words != sorted(uncased_words):
raise ValueError(
"The dictionary file is not sorted alphabetically (case-insensitive)."
)
for forbidden_word in sorted(
FORBIDDEN_WORDS & (words_set | set(uncased_words))
):
raise ValueError(
f"The dictionary file contains a forbidden word: {forbidden_word!r}. "
"Please remove it from the dictionary file and use 'codespell:ignore' "
"inline comment instead."
)
except Exception as err:
return [format_error_message(str(filename), err)]
return []
def main() -> None:
parser = argparse.ArgumentParser(
description="Check files for spelling mistakes using codespell.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(processName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
with concurrent.futures.ProcessPoolExecutor(
max_workers=os.cpu_count(),
) as executor:
futures = {executor.submit(check_file, x): x for x in args.filenames}
futures[executor.submit(check_dictionary, str(DICTIONARY))] = str(DICTIONARY)
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
| LintMessage |
python | ray-project__ray | python/ray/data/expressions.py | {
"start": 21946,
"end": 30077
} | class ____(Expr):
"""Expression that represents a user-defined function call.
This expression type wraps a UDF with schema inference capabilities,
allowing UDFs to be used seamlessly within the expression system.
UDFs operate on batches of data, where each column argument is passed
as a PyArrow Array containing multiple values from that column across the batch.
Args:
fn: The user-defined function to call
args: List of argument expressions (positional arguments)
kwargs: Dictionary of keyword argument expressions
function_name: Optional name for the function (for debugging)
Example:
>>> from ray.data.expressions import col, udf
>>> import pyarrow as pa
>>> import pyarrow.compute as pc
>>>
>>> @udf(return_dtype=DataType.int32())
... def add_one(x: pa.Array) -> pa.Array:
... return pc.add(x, 1)
>>>
>>> # Use in expressions
>>> expr = add_one(col("value"))
"""
fn: Callable[..., BatchColumn]
args: List[Expr]
kwargs: Dict[str, Expr]
def structurally_equals(self, other: Any) -> bool:
return (
isinstance(other, UDFExpr)
and self.fn == other.fn
and len(self.args) == len(other.args)
and all(a.structurally_equals(b) for a, b in zip(self.args, other.args))
and self.kwargs.keys() == other.kwargs.keys()
and all(
self.kwargs[k].structurally_equals(other.kwargs[k])
for k in self.kwargs.keys()
)
)
def _create_udf_callable(
fn: Callable[..., BatchColumn], return_dtype: DataType
) -> Callable[..., UDFExpr]:
"""Create a callable that generates UDFExpr when called with expressions."""
def udf_callable(*args, **kwargs) -> UDFExpr:
# Convert arguments to expressions if they aren't already
expr_args = []
for arg in args:
if isinstance(arg, Expr):
expr_args.append(arg)
else:
expr_args.append(LiteralExpr(arg))
expr_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, Expr):
expr_kwargs[k] = v
else:
expr_kwargs[k] = LiteralExpr(v)
return UDFExpr(
fn=fn,
args=expr_args,
kwargs=expr_kwargs,
data_type=return_dtype,
)
# Preserve original function metadata
functools.update_wrapper(udf_callable, fn)
# Store the original function for access if needed
udf_callable._original_fn = fn
return udf_callable
@PublicAPI(stability="alpha")
def udf(return_dtype: DataType) -> Callable[..., UDFExpr]:
"""
Decorator to convert a UDF into an expression-compatible function.
This decorator allows UDFs to be used seamlessly within the expression system,
enabling schema inference and integration with other expressions.
IMPORTANT: UDFs operate on batches of data, not individual rows. When your UDF
is called, each column argument will be passed as a PyArrow Array containing
multiple values from that column across the batch. Under the hood, when working
with multiple columns, they get translated to PyArrow arrays (one array per column).
Args:
return_dtype: The data type of the return value of the UDF
Returns:
A callable that creates UDFExpr instances when called with expressions
Example:
>>> from ray.data.expressions import col, udf
>>> import pyarrow as pa
>>> import pyarrow.compute as pc
>>> import ray
>>>
>>> # UDF that operates on a batch of values (PyArrow Array)
>>> @udf(return_dtype=DataType.int32())
... def add_one(x: pa.Array) -> pa.Array:
... return pc.add(x, 1) # Vectorized operation on the entire Array
>>>
>>> # UDF that combines multiple columns (each as a PyArrow Array)
>>> @udf(return_dtype=DataType.string())
... def format_name(first: pa.Array, last: pa.Array) -> pa.Array:
... return pc.binary_join_element_wise(first, last, " ") # Vectorized string concatenation
>>>
>>> # Use in dataset operations
>>> ds = ray.data.from_items([
... {"value": 5, "first": "John", "last": "Doe"},
... {"value": 10, "first": "Jane", "last": "Smith"}
... ])
>>>
>>> # Single column transformation (operates on batches)
>>> ds_incremented = ds.with_column("value_plus_one", add_one(col("value")))
>>>
>>> # Multi-column transformation (each column becomes a PyArrow Array)
>>> ds_formatted = ds.with_column("full_name", format_name(col("first"), col("last")))
>>>
>>> # Can also be used in complex expressions
>>> ds_complex = ds.with_column("doubled_plus_one", add_one(col("value")) * 2)
"""
def decorator(func: Callable[..., BatchColumn]) -> Callable[..., UDFExpr]:
return _create_udf_callable(func, return_dtype)
return decorator
def _create_pyarrow_wrapper(
fn: Callable[..., BatchColumn]
) -> Callable[..., BatchColumn]:
"""Wrap a PyArrow compute function to auto-convert inputs to PyArrow format.
This wrapper ensures that pandas Series and numpy arrays are converted to
PyArrow Arrays before being passed to the function, enabling PyArrow compute
functions to work seamlessly with any block format.
Args:
fn: The PyArrow compute function to wrap
Returns:
A wrapped function that handles format conversion
"""
@functools.wraps(fn)
def arrow_wrapper(*args, **kwargs):
import numpy as np
import pandas as pd
import pyarrow as pa
def to_arrow(val):
"""Convert a value to PyArrow Array if needed."""
if isinstance(val, (pa.Array, pa.ChunkedArray)):
return val, False
elif isinstance(val, pd.Series):
return pa.Array.from_pandas(val), True
elif isinstance(val, np.ndarray):
return pa.array(val), False
else:
return val, False
# Convert inputs to PyArrow and track pandas flags
args_results = [to_arrow(arg) for arg in args]
kwargs_results = {k: to_arrow(v) for k, v in kwargs.items()}
converted_args = [v[0] for v in args_results]
converted_kwargs = {k: v[0] for k, v in kwargs_results.items()}
input_was_pandas = any(v[1] for v in args_results) or any(
v[1] for v in kwargs_results.values()
)
# Call function with converted inputs
result = fn(*converted_args, **converted_kwargs)
# Convert result back to pandas if input was pandas
if input_was_pandas and isinstance(result, (pa.Array, pa.ChunkedArray)):
result = result.to_pandas()
return result
return arrow_wrapper
@PublicAPI(stability="alpha")
def pyarrow_udf(return_dtype: DataType) -> Callable[..., UDFExpr]:
"""Decorator for PyArrow compute functions with automatic format conversion.
This decorator wraps PyArrow compute functions to automatically convert pandas
Series and numpy arrays to PyArrow Arrays, ensuring the function works seamlessly
regardless of the underlying block format (pandas, arrow, or items).
Used internally by namespace methods (list, str, struct) that wrap PyArrow
compute functions.
Args:
return_dtype: The data type of the return value
Returns:
A callable that creates UDFExpr instances with automatic conversion
"""
def decorator(func: Callable[..., BatchColumn]) -> Callable[..., UDFExpr]:
# Wrap the function with PyArrow conversion logic
wrapped_fn = _create_pyarrow_wrapper(func)
# Create UDFExpr callable using the wrapped function
return _create_udf_callable(wrapped_fn, return_dtype)
return decorator
@DeveloperAPI(stability="alpha")
@dataclass(frozen=True, eq=False, repr=False)
| UDFExpr |
python | plotly__plotly.py | plotly/graph_objs/violin/_stream.py | {
"start": 233,
"end": 3494
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "violin"
_path_str = "violin.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.violin.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.violin.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.violin.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/model.py | {
"start": 649,
"end": 3754
} | class ____(BaseChatModel, Generic[StructuredResponseT]):
tool_calls: Union[list[list[ToolCall]], list[list[dict]]] | None = None
structured_response: StructuredResponseT | None = None
index: int = 0
tool_style: Literal["openai", "anthropic"] = "openai"
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
is_native = kwargs.get("response_format")
if self.tool_calls:
if is_native:
tool_calls = (
self.tool_calls[self.index] if self.index < len(self.tool_calls) else []
)
else:
tool_calls = self.tool_calls[self.index % len(self.tool_calls)]
else:
tool_calls = []
if is_native and not tool_calls:
if isinstance(self.structured_response, BaseModel):
content_obj = self.structured_response.model_dump()
elif is_dataclass(self.structured_response):
content_obj = asdict(self.structured_response)
elif isinstance(self.structured_response, dict):
content_obj = self.structured_response
message = AIMessage(content=json.dumps(content_obj), id=str(self.index))
else:
messages_string = "-".join([m.text for m in messages])
message = AIMessage(
content=messages_string,
id=str(self.index),
tool_calls=tool_calls.copy(),
)
self.index += 1
return ChatResult(generations=[ChatGeneration(message=message)])
@property
def _llm_type(self) -> str:
return "fake-tool-call-model"
def bind_tools(
self,
tools: Sequence[Union[dict[str, Any], type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
if len(tools) == 0:
msg = "Must provide at least one tool"
raise ValueError(msg)
tool_dicts = []
for tool in tools:
if isinstance(tool, dict):
tool_dicts.append(tool)
continue
if not isinstance(tool, BaseTool):
msg = "Only BaseTool and dict is supported by FakeToolCallingModel.bind_tools"
raise TypeError(msg)
# NOTE: this is a simplified tool spec for testing purposes only
if self.tool_style == "openai":
tool_dicts.append(
{
"type": "function",
"function": {
"name": tool.name,
},
}
)
elif self.tool_style == "anthropic":
tool_dicts.append(
{
"name": tool.name,
}
)
return self.bind(tools=tool_dicts, **kwargs)
| FakeToolCallingModel |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_pubsub.py | {
"start": 11017,
"end": 11643
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.pubsub.PubSubHook")
def test_execute(self, mock_hook):
operator = PubSubDeleteSubscriptionOperator(
task_id=TASK_ID, project_id=TEST_PROJECT, subscription=TEST_SUBSCRIPTION
)
operator.execute(None)
mock_hook.return_value.delete_subscription.assert_called_once_with(
project_id=TEST_PROJECT,
subscription=TEST_SUBSCRIPTION,
fail_if_not_exists=False,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestPubSubSubscriptionDeleteOperator |
python | pytorch__pytorch | test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py | {
"start": 1117,
"end": 1334
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(0)
self.p = nn.Parameter(torch.randn(40, 20))
def forward(self, x):
return self.p * x
| Task |
python | python-pillow__Pillow | Tests/test_nanoarrow.py | {
"start": 4540,
"end": 8765
} | class ____(NamedTuple):
dtype: nanoarrow
# Strictly speaking, elt should be a pixel or pixel component, so
# list[uint8][4], float, int, uint32, uint8, etc. But more
# correctly, it should be exactly the dtype from the line above.
elt: Any
elts_per_pixel: int
UINT_ARR = DataShape(
dtype=fl_uint8_4_type,
elt=[1, 2, 3, 4], # array of 4 uint8 per pixel
elts_per_pixel=1, # only one array per pixel
)
UINT = DataShape(
dtype=nanoarrow.uint8(),
elt=3, # one uint8,
elts_per_pixel=4, # but repeated 4x per pixel
)
UINT32 = DataShape(
dtype=nanoarrow.uint32(),
elt=0xABCDEF45, # one packed int, doesn't fit in a int32 > 0x80000000
elts_per_pixel=1, # one per pixel
)
INT32 = DataShape(
dtype=nanoarrow.uint32(),
elt=0x12CDEF45, # one packed int
elts_per_pixel=1, # one per pixel
)
@pytest.mark.parametrize(
"mode, data_tp, mask",
(
("L", DataShape(nanoarrow.uint8(), 3, 1), None),
("I", DataShape(nanoarrow.int32(), 1 << 24, 1), None),
("F", DataShape(nanoarrow.float32(), 3.14159, 1), None),
("LA", UINT_ARR, [0, 3]),
("LA", UINT, [0, 3]),
("RGB", UINT_ARR, [0, 1, 2]),
("RGBA", UINT_ARR, None),
("CMYK", UINT_ARR, None),
("YCbCr", UINT_ARR, [0, 1, 2]),
("HSV", UINT_ARR, [0, 1, 2]),
("RGB", UINT, [0, 1, 2]),
("RGBA", UINT, None),
("CMYK", UINT, None),
("YCbCr", UINT, [0, 1, 2]),
("HSV", UINT, [0, 1, 2]),
),
)
def test_fromarray(mode: str, data_tp: DataShape, mask: list[int] | None) -> None:
(dtype, elt, elts_per_pixel) = data_tp
ct_pixels = TEST_IMAGE_SIZE[0] * TEST_IMAGE_SIZE[1]
if dtype == fl_uint8_4_type:
tmp_arr = nanoarrow.Array(
elt * (ct_pixels * elts_per_pixel), schema=nanoarrow.uint8()
)
c_array = nanoarrow.c_array_from_buffers(
dtype, ct_pixels, buffers=[], children=[tmp_arr]
)
arr = nanoarrow.Array(c_array)
else:
arr = nanoarrow.Array(
nanoarrow.c_array([elt] * (ct_pixels * elts_per_pixel), schema=dtype)
)
img = Image.fromarrow(arr, mode, TEST_IMAGE_SIZE)
_test_img_equals_pyarray(img, arr, mask, elts_per_pixel)
@pytest.mark.parametrize(
"mode, mask",
(
("LA", [0, 3]),
("RGB", [0, 1, 2]),
("RGBA", None),
("CMYK", None),
("YCbCr", [0, 1, 2]),
("HSV", [0, 1, 2]),
),
)
@pytest.mark.parametrize("data_tp", (UINT32, INT32))
def test_from_int32array(mode: str, mask: list[int] | None, data_tp: DataShape) -> None:
(dtype, elt, elts_per_pixel) = data_tp
ct_pixels = TEST_IMAGE_SIZE[0] * TEST_IMAGE_SIZE[1]
arr = nanoarrow.Array(
nanoarrow.c_array([elt] * (ct_pixels * elts_per_pixel), schema=dtype)
)
img = Image.fromarrow(arr, mode, TEST_IMAGE_SIZE)
_test_img_equals_int32_pyarray(img, arr, mask, elts_per_pixel)
@pytest.mark.parametrize(
"mode, metadata",
(
("LA", ["L", "X", "X", "A"]),
("RGB", ["R", "G", "B", "X"]),
("RGBX", ["R", "G", "B", "X"]),
("RGBA", ["R", "G", "B", "A"]),
("CMYK", ["C", "M", "Y", "K"]),
("YCbCr", ["Y", "Cb", "Cr", "X"]),
("HSV", ["H", "S", "V", "X"]),
),
)
def test_image_nested_metadata(mode: str, metadata: list[str]) -> None:
img = hopper(mode)
arr = nanoarrow.Array(img)
assert arr.schema.value_type.metadata
assert arr.schema.value_type.metadata[b"image"]
parsed_metadata = json.loads(
arr.schema.value_type.metadata[b"image"].decode("utf8")
)
assert "bands" in parsed_metadata
assert parsed_metadata["bands"] == metadata
@pytest.mark.parametrize(
"mode, metadata",
(
("L", ["L"]),
("I", ["I"]),
("F", ["F"]),
),
)
def test_image_flat_metadata(mode: str, metadata: list[str]) -> None:
img = hopper(mode)
arr = nanoarrow.Array(img)
assert arr.schema.metadata
assert arr.schema.metadata[b"image"]
parsed_metadata = json.loads(arr.schema.metadata[b"image"].decode("utf8"))
assert "bands" in parsed_metadata
assert parsed_metadata["bands"] == metadata
| DataShape |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingNoneMember1.py | {
"start": 1105,
"end": 2012
} | class ____:
@property
def prop1(self) -> int: ...
member1: int
member2: int | None
member3: None
member4: int
def func3(c: Union[A, B]):
if c.prop1 is None:
reveal_type(c, expected_text="A | B")
else:
reveal_type(c, expected_text="A | B")
def func4(c: Union[A, B]):
if c.member1 is None:
reveal_type(c, expected_text="A")
else:
reveal_type(c, expected_text="B")
def func5(c: Union[A, B]):
if c.member2 is None:
reveal_type(c, expected_text="A | B")
else:
reveal_type(c, expected_text="A | B")
def func6(c: Union[A, B]):
if c.member3 is not None:
reveal_type(c, expected_text="A")
else:
reveal_type(c, expected_text="A | B")
def func7(c: Union[A, B]):
if c.member4 is not None:
reveal_type(c, expected_text="A | B")
else:
reveal_type(c, expected_text="A")
| B |
python | neetcode-gh__leetcode | python/1423-maximum-points-you-can-obtain-from-cards.py | {
"start": 0,
"end": 314
} | class ____:
def maxScore(self, cardPoints: List[int], k: int) -> int:
n = len(cardPoints)
score = maxScore = sum(cardPoints[:k])
for i in range(1, k + 1):
score += cardPoints[-i] - cardPoints[k - i]
maxScore = max(maxScore, score)
return maxScore
| Solution |
python | PrefectHQ__prefect | src/prefect/server/schemas/core.py | {
"start": 45493,
"end": 45837
} | class ____(ORMBaseModel):
token: str = Field(
default=...,
description="The CSRF token",
)
client: str = Field(
default=..., description="The client id associated with the CSRF token"
)
expiration: DateTime = Field(
default=..., description="The expiration time of the CSRF token"
)
| CsrfToken |
python | kamyu104__LeetCode-Solutions | Python/count-different-palindromic-subsequences.py | {
"start": 33,
"end": 1204
} | class ____(object):
def countPalindromicSubsequences(self, S):
"""
:type S: str
:rtype: int
"""
def dp(i, j, prv, nxt, lookup):
if lookup[i][j] is not None:
return lookup[i][j]
result = 1
if i <= j:
for x in xrange(4):
i0 = nxt[i][x]
j0 = prv[j][x]
if i <= i0 <= j:
result = (result + 1) % P
if None < i0 < j0:
result = (result + dp(i0+1, j0-1, prv, nxt, lookup)) % P
result %= P
lookup[i][j] = result
return result
prv = [None] * len(S)
nxt = [None] * len(S)
last = [None] * 4
for i in xrange(len(S)):
last[ord(S[i])-ord('a')] = i
prv[i] = tuple(last)
last = [None] * 4
for i in reversed(xrange(len(S))):
last[ord(S[i])-ord('a')] = i
nxt[i] = tuple(last)
P = 10**9 + 7
lookup = [[None] * len(S) for _ in xrange(len(S))]
return dp(0, len(S)-1, prv, nxt, lookup) - 1
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super6.py | {
"start": 410,
"end": 637
} | class ____(type):
def __new__(cls, name: str, bases, dct):
new_class = super().__new__(cls, name, bases, dct)
reveal_type(new_class, expected_text="Self@FirstLevelMeta")
return new_class
| FirstLevelMeta |
python | coleifer__peewee | tests/sqlite.py | {
"start": 67589,
"end": 68824
} | class ____(BaseTestCase):
def test_model_factory(self):
class Category(TestModel):
name = CharField()
parent = ForeignKeyField('self', null=True)
Closure = ClosureTable(Category)
self.assertEqual(Closure._meta.extension_module, 'transitive_closure')
self.assertEqual(Closure._meta.columns, {})
self.assertEqual(Closure._meta.fields, {})
self.assertFalse(Closure._meta.primary_key)
self.assertEqual(Closure._meta.options, {
'idcolumn': 'id',
'parentcolumn': 'parent_id',
'tablename': 'category',
})
class Alt(TestModel):
pk = AutoField()
ref = ForeignKeyField('self', null=True)
Closure = ClosureTable(Alt)
self.assertEqual(Closure._meta.columns, {})
self.assertEqual(Closure._meta.fields, {})
self.assertFalse(Closure._meta.primary_key)
self.assertEqual(Closure._meta.options, {
'idcolumn': 'pk',
'parentcolumn': 'ref_id',
'tablename': 'alt',
})
class NoForeignKey(TestModel):
pass
self.assertRaises(ValueError, ClosureTable, NoForeignKey)
| TestTransitiveClosure |
python | sqlalchemy__sqlalchemy | test/ext/test_extendedattr.py | {
"start": 18598,
"end": 21466
} | class ____(_ExtBase, fixtures.ORMTest):
def test_none(self):
class A:
pass
register_class(A)
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class B:
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
register_class(B)
class C:
__sa_instrumentation_manager__ = instrumentation.ClassManager
register_class(C)
def test_single_down(self):
class A:
pass
register_class(A)
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class B(A):
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
assert_raises_message(
TypeError,
"multiple instrumentation implementations",
register_class,
B,
)
def test_single_up(self):
class A:
pass
# delay registration
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class B(A):
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
register_class(B)
assert_raises_message(
TypeError,
"multiple instrumentation implementations",
register_class,
A,
)
def test_diamond_b1(self):
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class A:
pass
class B1(A):
pass
class B2(A):
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
class C:
pass
assert_raises_message(
TypeError,
"multiple instrumentation implementations",
register_class,
B1,
)
def test_diamond_b2(self):
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class A:
pass
class B1(A):
pass
class B2(A):
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
class C:
pass
register_class(B2)
assert_raises_message(
TypeError,
"multiple instrumentation implementations",
register_class,
B1,
)
def test_diamond_c_b(self):
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class A:
pass
class B1(A):
pass
class B2(A):
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
class C:
pass
register_class(C)
assert_raises_message(
TypeError,
"multiple instrumentation implementations",
register_class,
B1,
)
| InstrumentationCollisionTest |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 26220,
"end": 26287
} | class ____(BaseModel):
type: Literal["NoPagination"]
| NoPagination |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 3764,
"end": 3878
} | class ____(OnClauseRole):
__slots__ = ()
_role_name = "SQL expression for WHERE/HAVING role"
| WhereHavingRole |
python | rapidsai__cudf | python/dask_cudf/dask_cudf/io/parquet.py | {
"start": 3098,
"end": 7583
} | class ____(ReadParquetFSSpec):
_STATS_CACHE: MutableMapping[str, Any] = {}
def approx_statistics(self):
# Use a few files to approximate column-size statistics
key = tokenize(self._dataset_info["ds"].files[:10], self.filters)
try:
return self._STATS_CACHE[key]
except KeyError:
# Account for filters
ds_filters = None
if self.filters is not None:
ds_filters = _filters_to_expression(self.filters)
# Use average total_uncompressed_size of three files
n_sample = 3
column_sizes = {}
for i, frag in enumerate(
self._dataset_info["ds"].get_fragments(ds_filters)
):
md = frag.metadata
for rg in range(md.num_row_groups):
row_group = md.row_group(rg)
for col in range(row_group.num_columns):
column = row_group.column(col)
name = column.path_in_schema
if name not in column_sizes:
column_sizes[name] = np.zeros(
n_sample, dtype="int64"
)
column_sizes[name][i] += column.total_uncompressed_size
if (i + 1) >= n_sample:
break
# Reorganize stats to look like arrow-fs version
self._STATS_CACHE[key] = {
"columns": [
{
"path_in_schema": name,
"total_uncompressed_size": np.mean(sizes),
}
for name, sizes in column_sizes.items()
]
}
return self._STATS_CACHE[key]
@functools.cached_property
def _fusion_compression_factor(self):
# Disable fusion when blocksize=None
if self.blocksize is None:
return 1
# At this point, we *may* have used `blockwise`
# already to split or aggregate files. We don't
# *know* if the current partitions correspond to
# individual/full files, multiple/aggregated files
# or partial/split files.
#
# Therefore, we need to use the statistics from
# a few files to estimate the current partition
# size. This size should be similar to `blocksize`
# *if* aggregate_files is True or if the files
# are *smaller* than `blocksize`.
# Step 1: Sample statistics
approx_stats = self.approx_statistics()
projected_size, original_size = 0, 0
col_op = self.operand("columns") or self.columns
for col in approx_stats["columns"]:
original_size += col["total_uncompressed_size"]
if col["path_in_schema"] in col_op or (
(split_name := col["path_in_schema"].split("."))
and split_name[0] in col_op
):
projected_size += col["total_uncompressed_size"]
if original_size < 1 or projected_size < 1:
return 1
# Step 2: Estimate the correction factor
# (Correct for possible pre-optimization fusion/splitting)
blocksize = parse_bytes(self.blocksize)
if original_size > blocksize:
# Input files are bigger than blocksize
# and we already split these large files.
# (correction_factor > 1)
correction_factor = original_size / blocksize
elif self.aggregate_files:
# Input files are smaller than blocksize
# and we already aggregate small files.
# (correction_factor == 1)
correction_factor = 1
else:
# Input files are smaller than blocksize
# but we haven't aggregate small files yet.
# (correction_factor < 1)
correction_factor = original_size / blocksize
# Step 3. Estimate column-projection factor
if self.operand("columns") is None:
projection_factor = 1
else:
projection_factor = projected_size / original_size
return max(projection_factor * correction_factor, 0.001)
def _tune_up(self, parent):
if self._fusion_compression_factor >= 1:
return
if isinstance(parent, FusedIO):
return
return parent.substitute(self, CudfFusedIO(self))
| CudfReadParquetFSSpec |
python | davidhalter__jedi | jedi/inference/star_args.py | {
"start": 7679,
"end": 7895
} | class ____(ParamNameWrapper):
def __init__(self, param_name, new_kind):
super().__init__(param_name)
self._new_kind = new_kind
def get_kind(self):
return self._new_kind
| ParamNameFixedKind |
python | great-expectations__great_expectations | great_expectations/core/yaml_handler.py | {
"start": 185,
"end": 3758
} | class ____:
"""Facade class designed to be a lightweight wrapper around YAML serialization.
For all YAML-related activities in Great Expectations, this is the entry point.
Note that this is meant to be library agnostic - the underlying implementation does not
matter as long as we fulfill the following contract:
* load
* dump
Typical usage example:
```python
simple_yaml: str = '''
name: test
class_name: test_class
module_name: test.test_class
'''
yaml_handler = YAMLHandler()
res: dict = yaml_handler.load(simple_yaml)
example_dict: dict = dict(abc=1)
yaml_handler.dump(example_dict)
```
"""
def __init__(self) -> None:
self._handler = YAML(typ="safe")
# TODO: ensure this does not break all usage of ruamel in GX codebase.
self._handler.indent(mapping=2, sequence=4, offset=2)
self._handler.default_flow_style = False
def load(self, stream: io.TextIOWrapper | str) -> dict[str, JSONValues]:
"""Converts a YAML input stream into a Python dictionary.
Example:
```python
import pathlib
yaml_handler = YAMLHandler()
my_file_str = pathlib.Path("my_file.yaml").read_text()
dict_from_yaml = yaml_handler.load(my_file_str)
```
Args:
stream: The input stream to read in. Although this function calls ruamel's load(), we
use a slightly more restrictive type-hint than ruamel (which uses Any). This is in order to tightly
bind the behavior of the YamlHandler class with expected YAML-related activities of Great Expectations.
Returns:
The deserialized dictionary form of the input stream.
""" # noqa: E501 # FIXME CoP
return self._handler.load(stream=stream)
def dump(
self,
data: dict,
stream: io.TextIOWrapper | io.StringIO | Path | None = None,
**kwargs,
) -> str | None:
"""Converts a Python dictionary into a YAML string.
Dump code has been adopted from:
https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
```python
>>> data = {'foo': 'bar'}
>>> yaml_str = yaml_handler.dump(data)
>>> print(yaml_str)
foo:
bar:
```
Args:
data: The dictionary to serialize into a Python object.
stream: The output stream to modify. If not provided, we default to io.StringIO.
kwargs: Additional key-word arguments to pass to underlying yaml dump method.
Returns:
If no stream argument is provided, the str that results from ``_handler.dump()``.
Otherwise, None as the ``_handler.dump()`` works in place and will exercise the handler accordingly.
""" # noqa: E501 # FIXME CoP
if stream:
return self._dump(data=data, stream=stream, **kwargs) # type: ignore[func-returns-value] # FIXME CoP
return self._dump_and_return_value(data=data, **kwargs)
def _dump(self, data: dict, stream, **kwargs) -> None:
"""If an input stream has been provided, modify it in place."""
self._handler.dump(data=data, stream=stream, **kwargs)
def _dump_and_return_value(self, data: dict, **kwargs) -> str:
"""If an input stream hasn't been provided, generate one and return the value."""
stream = io.StringIO()
self._handler.dump(data=data, stream=stream, **kwargs)
return stream.getvalue()
| YAMLHandler |
python | django__django | tests/staticfiles_tests/test_management.py | {
"start": 17349,
"end": 19912
} | class ____(CollectionTestCase):
"""
Test overriding duplicated files by ``collectstatic`` management command.
Check for proper handling of apps order in installed apps even if file
modification dates are in different order:
'staticfiles_test_app',
'staticfiles_tests.apps.no_label',
"""
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
# get modification and access times for no_label/static/file2.txt
self.orig_path = os.path.join(
TEST_ROOT, "apps", "no_label", "static", "file2.txt"
)
self.orig_mtime = os.path.getmtime(self.orig_path)
self.orig_atime = os.path.getatime(self.orig_path)
# prepare duplicate of file2.txt from a temporary app
# this file will have modification time older than
# no_label/static/file2.txt anyway it should be taken to STATIC_ROOT
# because the temporary app is before 'no_label' app in installed apps
self.temp_app_path = os.path.join(self.temp_dir, "staticfiles_test_app")
self.testfile_path = os.path.join(self.temp_app_path, "static", "file2.txt")
os.makedirs(self.temp_app_path)
with open(os.path.join(self.temp_app_path, "__init__.py"), "w+"):
pass
os.makedirs(os.path.dirname(self.testfile_path))
with open(self.testfile_path, "w+") as f:
f.write("duplicate of file2.txt")
os.utime(self.testfile_path, (self.orig_atime - 1, self.orig_mtime - 1))
settings_with_test_app = self.modify_settings(
INSTALLED_APPS={"prepend": "staticfiles_test_app"},
)
with extend_sys_path(self.temp_dir):
settings_with_test_app.enable()
self.addCleanup(settings_with_test_app.disable)
super().setUp()
def test_ordering_override(self):
"""
Test if collectstatic takes files in proper order
"""
self.assertFileContains("file2.txt", "duplicate of file2.txt")
# run collectstatic again
self.run_collectstatic()
self.assertFileContains("file2.txt", "duplicate of file2.txt")
# The collectstatic test suite already has conflicting files since both
# project/test/file.txt and apps/test/static/test/file.txt are collected. To
# properly test for the warning not happening unless we tell it to explicitly,
# we remove the project directory and will add back a conflicting file later.
@override_settings(STATICFILES_DIRS=[])
| TestCollectionFilesOverride |
python | walkccc__LeetCode | solutions/2312. Selling Pieces of Wood/2312.py | {
"start": 0,
"end": 540
} | class ____:
def sellingWood(self, m: int, n: int, prices: list[list[int]]) -> int:
# dp[i][j] := the maximum money of cutting i x j piece of wood
dp = [[0] * (n + 1) for _ in range(m + 1)]
for h, w, price in prices:
dp[h][w] = price
for i in range(1, m + 1):
for j in range(1, n + 1):
for h in range(1, i // 2 + 1):
dp[i][j] = max(dp[i][j], dp[h][j] + dp[i - h][j])
for w in range(1, j // 2 + 1):
dp[i][j] = max(dp[i][j], dp[i][w] + dp[i][j - w])
return dp[m][n]
| Solution |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/pyutils/pair_set.py | {
"start": 0,
"end": 1168
} | class ____(object):
__slots__ = '_data',
def __init__(self):
self._data = {}
def __contains__(self, item):
return self.has(item[0], item[1], item[2])
def __str__(self):
return str(self._data)
def __repr__(self):
return str(self._data)
def has(self, a, b, are_mutually_exclusive):
first = self._data.get(a)
result = first and first.get(b)
if result is None:
return False
# are_mutually_exclusive being false is a superset of being true,
# hence if we want to know if this PairSet "has" these two with no
# exclusivity, we have to ensure it was added as such.
if not are_mutually_exclusive:
return not result
return True
def add(self, a, b, are_mutually_exclusive):
_pair_set_add(self._data, a, b, are_mutually_exclusive)
_pair_set_add(self._data, b, a, are_mutually_exclusive)
return self
def _pair_set_add(data, a, b, are_mutually_exclusive):
sub_dict = data.get(a)
if not sub_dict:
sub_dict = {}
data[a] = sub_dict
sub_dict[b] = are_mutually_exclusive
| PairSet |
python | conda__conda | conda/common/configuration.py | {
"start": 35992,
"end": 37053
} | class ____(Parameter):
"""
Parameter type for a Configuration class that holds a single python primitive value.
The python primitive types are str, int, float, complex, bool, and NoneType. In addition,
python 2 has long and unicode types.
"""
def __init__(self, default, element_type=None, validation=None):
"""
Args:
default (primitive value): default value if the Parameter is not found.
element_type (type or tuple[type]): Type-validation of parameter's value. If None,
type(default) is used.
"""
self._type = type(default) if element_type is None else element_type
self._element_type = self._type
super().__init__(default, validation)
def load(self, name, match):
return PrimitiveLoadedParameter(
name,
self._type,
match.value(self._element_type),
match.keyflag(),
match.valueflags(self._element_type),
validation=self._validation,
)
| PrimitiveParameter |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 64159,
"end": 64836
} | class ____(themeable):
"""
How to renderer fonts for svg images
Parameters
----------
theme_element : bool
If `True`, assume fonts are installed on the machine where
the SVG will be viewed.
If `False`, embed characters as paths; this is supported by
most SVG renderers.
You should probably set this to `True` if you intend to edit
the svg file.
"""
@property
def rcParams(self) -> dict[str, Any]:
rcParams = super().rcParams
rcParams["svg.fonttype"] = (
"none" if self.properties.get("value") else "path"
)
return rcParams
# Deprecated
| svg_usefonts |
python | jazzband__django-redis | django_redis/compressors/lz4.py | {
"start": 204,
"end": 580
} | class ____(BaseCompressor):
min_length = 15
def compress(self, value: bytes) -> bytes:
if len(value) > self.min_length:
return _compress(value)
return value
def decompress(self, value: bytes) -> bytes:
try:
return _decompress(value)
except Exception as e:
raise CompressorError from e
| Lz4Compressor |
python | google__jax | jax/_src/debugger/colab_lib.py | {
"start": 1524,
"end": 2504
} | class ____(DynamicDOMElement):
"""A `div` that can be edited."""
_uuid: str = dataclasses.field(init=False)
_root_elem: DOMElement = dataclasses.field(init=False)
elem: DOMElement | str
def __post_init__(self):
self._uuid = str(uuid.uuid4())
self._rendered = False
self._root_elem = div(id=self.tag)
@property
def tag(self):
return f"tag-{self._uuid}"
def render(self):
if self._rendered:
raise ValueError("Can't call `render` twice.")
self._root_elem.render()
self._rendered = True
self.append(self.elem)
def append(self, child: DOMElement):
if not self._rendered:
self.render()
with output.use_tags([self.tag]):
with output.redirect_to_element(f"#{self.tag}"):
child.render()
def update(self, elem: DOMElement):
self.clear()
self.elem = elem
self.render()
def clear(self):
output.clear(output_tags=[self.tag])
self._rendered = False
@dataclasses.dataclass
| DynamicDiv |
python | django__django | tests/null_fk_ordering/models.py | {
"start": 992,
"end": 1230
} | class ____(models.Model):
post = models.ForeignKey(Post, models.SET_NULL, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ["post__forum__system_info__system_name", "comment_text"]
| Comment |
python | huggingface__transformers | src/transformers/models/distilbert/modeling_distilbert.py | {
"start": 11023,
"end": 11999
} | class ____(PreTrainedModel):
config: DistilBertConfig
base_model_prefix = "distilbert"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": TransformerBlock,
"attentions": DistilBertSelfAttention,
}
@torch.no_grad()
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
super()._init_weights(module)
if isinstance(module, Embeddings) and self.config.sinusoidal_pos_embds:
init.copy_(
module.position_embeddings.weight,
create_sinusoidal_embeddings(
self.config.max_position_embeddings,
self.config.dim,
torch.empty_like(module.position_embeddings.weight),
),
)
@auto_docstring
| DistilBertPreTrainedModel |
python | realpython__materials | python-type-checking/game_003.py | {
"start": 284,
"end": 806
} | class ____:
def __init__(self, cards):
self.cards = cards
@classmethod
def create(cls, shuffle=False):
"""Create a new deck of 52 cards"""
cards = [Card(s, r) for r in Card.RANKS for s in Card.SUITS]
if shuffle:
random.shuffle(cards)
return cls(cards)
def deal(self, num_hands):
"""Deal the cards in the deck into a number of hands"""
cls = self.__class__
return tuple(cls(self.cards[i::num_hands]) for i in range(num_hands))
| Deck |
python | ansible__ansible | lib/ansible/parsing/vault/__init__.py | {
"start": 2829,
"end": 9769
} | class ____(AnsibleError):
pass
def is_encrypted(data):
""" Test if this is vault encrypted data blob
:arg data: a byte or text string to test whether it is recognized as vault
encrypted data
:returns: True if it is recognized. Otherwise, False.
"""
try:
# Make sure we have a byte string and that it only contains ascii
# bytes.
b_data = to_bytes(to_text(data, encoding='ascii', errors='strict', nonstring='strict'), encoding='ascii', errors='strict')
except (UnicodeError, TypeError):
# The vault format is pure ascii so if we failed to encode to bytes
# via ascii we know that this is not vault data.
# Similarly, if it's not a string, it's not vault data
return False
if b_data.startswith(b_HEADER):
return True
return False
def is_encrypted_file(file_obj, start_pos=0, count=len(b_HEADER)):
"""Test if the contents of a file obj are a vault encrypted data blob.
:arg file_obj: A file object that will be read from.
:kwarg start_pos: A byte offset in the file to start reading the header
from. Defaults to 0, the beginning of the file.
:kwarg count: Read up to this number of bytes from the file to determine
if it looks like encrypted vault data. The default is the size of the
the vault header, which is what is needed most times.
For some IO classes, or files that don't begin with the vault itself,
set to -1 to read to the end of file.
:returns: True if the file looks like a vault file. Otherwise, False.
"""
# read the header and reset the file stream to where it started
current_position = file_obj.tell()
try:
file_obj.seek(start_pos)
return is_encrypted(file_obj.read(count))
finally:
file_obj.seek(current_position)
def _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None):
b_tmpdata = b_vaulttext_envelope.splitlines()
b_tmpheader = b_tmpdata[0].strip().split(b';')
b_version = b_tmpheader[1].strip()
cipher_name = to_text(b_tmpheader[2].strip())
vault_id = default_vault_id
# Only attempt to find vault_id if the vault file is version 1.2 or newer
# if self.b_version == b'1.2':
if len(b_tmpheader) >= 4:
vault_id = to_text(b_tmpheader[3].strip())
b_ciphertext = b''.join(b_tmpdata[1:])
# DTFIX7: possible candidate for propagate_origin
b_ciphertext = AnsibleTagHelper.tag_copy(b_vaulttext_envelope, b_ciphertext)
return b_ciphertext, b_version, cipher_name, vault_id
def parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id=None):
"""Parse the vaulttext envelope
When data is saved, it has a header prepended and is formatted into 80
character lines. This method extracts the information from the header
and then removes the header and the inserted newlines. The string returned
is suitable for processing by the Cipher classes.
:arg b_vaulttext_envelope: byte str containing the data from a save file
:arg default_vault_id: The vault_id name to use if the vaulttext does not provide one.
:returns: A tuple of byte str of the vaulttext suitable to pass to parse_vaultext,
a byte str of the vault format version,
the name of the cipher used, and the vault_id.
:raises: AnsibleVaultFormatError: if the vaulttext_envelope format is invalid
"""
# used by decrypt
default_vault_id = default_vault_id or C.DEFAULT_VAULT_IDENTITY
try:
return _parse_vaulttext_envelope(b_vaulttext_envelope, default_vault_id)
except Exception as ex:
raise AnsibleVaultFormatError("Vault envelope format error.", obj=b_vaulttext_envelope) from ex
def format_vaulttext_envelope(b_ciphertext, cipher_name, version=None, vault_id=None):
""" Add header and format to 80 columns
:arg b_ciphertext: the encrypted and hexlified data as a byte string
:arg cipher_name: unicode cipher name (for ex, u'AES256')
:arg version: unicode vault version (for ex, '1.2'). Optional ('1.1' is default)
:arg vault_id: unicode vault identifier. If provided, the version will be bumped to 1.2.
:returns: a byte str that should be dumped into a file. It's
formatted to 80 char columns and has the header prepended
"""
if not cipher_name:
raise AnsibleError("the cipher must be set before adding a header")
version = version or '1.1'
# If we specify a vault_id, use format version 1.2. For no vault_id, stick to 1.1
if vault_id and vault_id != u'default':
version = '1.2'
b_version = to_bytes(version, 'utf-8', errors='strict')
b_vault_id = to_bytes(vault_id, 'utf-8', errors='strict')
b_cipher_name = to_bytes(cipher_name, 'utf-8', errors='strict')
header_parts = [b_HEADER,
b_version,
b_cipher_name]
if b_version == b'1.2' and b_vault_id:
header_parts.append(b_vault_id)
header = b';'.join(header_parts)
b_vaulttext = [header]
b_vaulttext += [b_ciphertext[i:i + 80] for i in range(0, len(b_ciphertext), 80)]
b_vaulttext += [b'']
b_vaulttext = b'\n'.join(b_vaulttext)
return b_vaulttext
def _unhexlify(b_data):
try:
# DTFIX7: possible candidate for propagate_origin
return AnsibleTagHelper.tag_copy(b_data, unhexlify(b_data))
except (BinasciiError, TypeError) as ex:
raise AnsibleVaultFormatError('Vault format unhexlify error.', obj=b_data) from ex
def _parse_vaulttext(b_vaulttext):
b_vaulttext = _unhexlify(b_vaulttext)
b_salt, b_crypted_hmac, b_ciphertext = b_vaulttext.split(b"\n", 2)
b_salt = _unhexlify(b_salt)
b_ciphertext = _unhexlify(b_ciphertext)
return b_ciphertext, b_salt, b_crypted_hmac
def parse_vaulttext(b_vaulttext):
"""Parse the vaulttext
:arg b_vaulttext: byte str containing the vaulttext (ciphertext, salt, crypted_hmac)
:returns: A tuple of byte str of the ciphertext suitable for passing to a
Cipher class's decrypt() function, a byte str of the salt,
and a byte str of the crypted_hmac
:raises: AnsibleVaultFormatError: if the vaulttext format is invalid
"""
# SPLIT SALT, DIGEST, AND DATA
try:
return _parse_vaulttext(b_vaulttext)
except AnsibleVaultFormatError:
raise
except Exception as ex:
raise AnsibleVaultFormatError("Vault vaulttext format error.", obj=b_vaulttext) from ex
def verify_secret_is_not_empty(secret, msg=None):
"""Check the secret against minimal requirements.
Raises: AnsibleVaultPasswordError if the password does not meet requirements.
Currently, only requirement is that the password is not None or an empty string.
"""
msg = msg or 'Invalid vault password was provided'
if not secret:
raise AnsibleVaultPasswordError(msg)
| AnsibleVaultFormatError |
python | pytorch__pytorch | torch/_dynamo/variables/functions.py | {
"start": 86733,
"end": 90075
} | class ____(VariableTracker):
def __init__(
self,
func: VariableTracker,
args: Sequence[VariableTracker],
keywords: dict[str, VariableTracker],
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.func = func
assert isinstance(args, list)
self.args = args
assert isinstance(keywords, dict)
self.keywords = keywords
# fake_value is used for id calculation. Creating this value and id'ng
# on it is sufficient for the tracing purposes.
self.fake_value = functools.partial(identity)
def python_type(self) -> type:
return functools.partial
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(lambda: codegen.load_import_from("functools", "partial"))
codegen(self.func)
if self.args:
codegen.foreach(self.args)
if not self.keywords:
codegen.extend_output(create_call_function(len(self.args) + 1, False))
return
codegen.foreach(self.keywords.values())
keys = tuple(self.keywords.keys())
codegen.extend_output(
codegen.create_call_function_kw(len(keys) + len(self.args) + 1, keys, False)
)
def get_function(self) -> Any:
return self.as_python_constant()
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
merged_args = self.args + list(args)
merged_kwargs = {**self.keywords, **kwargs}
return self.func.call_function(tx, merged_args, merged_kwargs)
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> ConstantVariable:
# functools.partial uses slots, so attributes are constant
return variables.ConstantVariable.create(
hasattr(functools.partial(identity), name)
)
def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker:
source = self.source and AttrSource(self.source, name)
# Handle __slots__
if name == "func":
return self.func
if name == "args":
return variables.ListVariable(self.args, source=source)
if name == "keywords":
items = {ConstantVariable.create(k): v for k, v in self.keywords.items()}
return variables.ConstDictVariable(items, source=source)
if name in cmp_name_to_op_mapping:
return variables.GetAttrVariable(self, name)
raise_observed_exception(AttributeError, tx)
def as_python_constant(self) -> Any:
return functools.partial(
self.func.as_python_constant(),
*[arg.as_python_constant() for arg in self.args],
**{k: v.as_python_constant() for k, v in self.keywords.items()},
)
def guard_as_python_constant(self) -> Any:
"""Similar to as_python_constant(), but add ID_MATCH guards to try to force things to become constants"""
return functools.partial(
self.func.guard_as_python_constant(),
*[v.guard_as_python_constant() for v in self.args],
**{k: v.guard_as_python_constant() for k, v in self.keywords.items()},
)
| FunctoolsPartialVariable |
python | fsspec__filesystem_spec | fsspec/caching.py | {
"start": 737,
"end": 2795
} | class ____:
"""Pass-though cache: doesn't keep anything, calls every time
Acts as base class for other cachers
Parameters
----------
blocksize: int
How far to read ahead in numbers of bytes
fetcher: func
Function of the form f(start, end) which gets bytes from remote as
specified
size: int
How big this file is
"""
name: ClassVar[str] = "none"
def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
self.blocksize = blocksize
self.nblocks = 0
self.fetcher = fetcher
self.size = size
self.hit_count = 0
self.miss_count = 0
# the bytes that we actually requested
self.total_requested_bytes = 0
def _fetch(self, start: int | None, stop: int | None) -> bytes:
if start is None:
start = 0
if stop is None:
stop = self.size
if start >= self.size or start >= stop:
return b""
return self.fetcher(start, stop)
def _reset_stats(self) -> None:
"""Reset hit and miss counts for a more ganular report e.g. by file."""
self.hit_count = 0
self.miss_count = 0
self.total_requested_bytes = 0
def _log_stats(self) -> str:
"""Return a formatted string of the cache statistics."""
if self.hit_count == 0 and self.miss_count == 0:
# a cache that does nothing, this is for logs only
return ""
return f" , {self.name}: {self.hit_count} hits, {self.miss_count} misses, {self.total_requested_bytes} total requested bytes"
def __repr__(self) -> str:
# TODO: use rich for better formatting
return f"""
<{self.__class__.__name__}:
block size : {self.blocksize}
block count : {self.nblocks}
file size : {self.size}
cache hits : {self.hit_count}
cache misses: {self.miss_count}
total requested bytes: {self.total_requested_bytes}>
"""
| BaseCache |
python | ansible__ansible | test/integration/targets/ansible-inventory/filter_plugins/toml.py | {
"start": 129,
"end": 256
} | class ____:
def filters(self) -> dict[str, t.Any]:
return dict(
from_toml=from_toml,
)
| FilterModule |
python | encode__django-rest-framework | rest_framework/throttling.py | {
"start": 1426,
"end": 5234
} | class ____(BaseThrottle):
"""
A simple cache implementation, that only requires `.get_cache_key()`
to be overridden.
The rate (requests / seconds) is set by a `rate` attribute on the Throttle
class. The attribute is a string of the form 'number_of_requests/period'.
Period should be one of: ('s', 'sec', 'm', 'min', 'h', 'hour', 'd', 'day')
Previous request information used for throttling is stored in the cache.
"""
cache = default_cache
timer = time.time
cache_format = 'throttle_%(scope)s_%(ident)s'
scope = None
THROTTLE_RATES = api_settings.DEFAULT_THROTTLE_RATES
def __init__(self):
if not getattr(self, 'rate', None):
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
def get_cache_key(self, request, view):
"""
Should return a unique cache-key which can be used for throttling.
Must be overridden.
May return `None` if the request should not be throttled.
"""
raise NotImplementedError('.get_cache_key() must be overridden')
def get_rate(self):
"""
Determine the string representation of the allowed request rate.
"""
if not getattr(self, 'scope', None):
msg = ("You must set either `.scope` or `.rate` for '%s' throttle" %
self.__class__.__name__)
raise ImproperlyConfigured(msg)
try:
return self.THROTTLE_RATES[self.scope]
except KeyError:
msg = "No default throttle rate set for '%s' scope" % self.scope
raise ImproperlyConfigured(msg)
def parse_rate(self, rate):
"""
Given the request rate string, return a two tuple of:
<allowed number of requests>, <period of time in seconds>
"""
if rate is None:
return (None, None)
num, period = rate.split('/')
num_requests = int(num)
duration = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400}[period[0]]
return (num_requests, duration)
def allow_request(self, request, view):
"""
Implement the check to see if the request should be throttled.
On success calls `throttle_success`.
On failure calls `throttle_failure`.
"""
if self.rate is None:
return True
self.key = self.get_cache_key(request, view)
if self.key is None:
return True
self.history = self.cache.get(self.key, [])
self.now = self.timer()
# Drop any requests from the history which have now passed the
# throttle duration
while self.history and self.history[-1] <= self.now - self.duration:
self.history.pop()
if len(self.history) >= self.num_requests:
return self.throttle_failure()
return self.throttle_success()
def throttle_success(self):
"""
Inserts the current request's timestamp along with the key
into the cache.
"""
self.history.insert(0, self.now)
self.cache.set(self.key, self.history, self.duration)
return True
def throttle_failure(self):
"""
Called when a request to the API has failed due to throttling.
"""
return False
def wait(self):
"""
Returns the recommended next request time in seconds.
"""
if self.history:
remaining_duration = self.duration - (self.now - self.history[-1])
else:
remaining_duration = self.duration
available_requests = self.num_requests - len(self.history) + 1
if available_requests <= 0:
return None
return remaining_duration / float(available_requests)
| SimpleRateThrottle |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 16342,
"end": 16733
} | class ____(ABC, Generic[T_LogChannel]):
LOG_WRITER_KEY = "log_writer"
@abstractmethod
@contextmanager
def open(self, params: PipesParams) -> Iterator[T_LogChannel]: ...
@final
def get_opened_payload(self) -> PipesLogWriterOpenedData:
return {"extras": self.get_opened_extras()}
def get_opened_extras(self) -> PipesExtras:
return {}
| PipesLogWriter |
python | pypa__pip | src/pip/_vendor/rich/abc.py | {
"start": 22,
"end": 890
} | class ____(ABC):
"""An abstract base class for Rich renderables.
Note that there is no need to extend this class, the intended use is to check if an
object supports the Rich renderable protocol. For example::
if isinstance(my_object, RichRenderable):
console.print(my_object)
"""
@classmethod
def __subclasshook__(cls, other: type) -> bool:
"""Check if this class supports the rich render protocol."""
return hasattr(other, "__rich_console__") or hasattr(other, "__rich__")
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.text import Text
t = Text()
print(isinstance(Text, RichRenderable))
print(isinstance(t, RichRenderable))
class Foo:
pass
f = Foo()
print(isinstance(f, RichRenderable))
print(isinstance("", RichRenderable))
| RichRenderable |
python | Netflix__metaflow | test/unit/test_secrets_decorator.py | {
"start": 363,
"end": 4569
} | class ____(unittest.TestCase):
@patch(
"metaflow.metaflow_config.DEFAULT_SECRETS_BACKEND_TYPE",
None,
)
def test_missing_default_secrets_backend_type(self):
self.assertIsNone(metaflow.metaflow_config.DEFAULT_SECRETS_BACKEND_TYPE)
# assumes DEFAULT_SECRETS_BACKEND_TYPE is None when we run this test
with self.assertRaises(MetaflowException):
SecretSpec.secret_spec_from_str("secret_id", None)
@patch(
"metaflow.metaflow_config.DEFAULT_SECRETS_BACKEND_TYPE",
"some-default-backend-type",
)
def test_constructors(self):
# from str
# explicit type
self.assertEqual(
{
"options": {},
"secret_id": "the_id",
"secrets_backend_type": "explicit-type",
"role": None,
},
SecretSpec.secret_spec_from_str("explicit-type.the_id", None).to_json(),
)
# implicit type
self.assertEqual(
{
"options": {},
"secret_id": "the_id",
"secrets_backend_type": "some-default-backend-type",
"role": None,
},
SecretSpec.secret_spec_from_str("the_id", None).to_json(),
)
# from dict
# explicit type, no options
self.assertEqual(
{
"options": {},
"secret_id": "the_id",
"secrets_backend_type": "explicit-type",
"role": None,
},
SecretSpec.secret_spec_from_dict(
{
"type": "explicit-type",
"id": "the_id",
},
None,
).to_json(),
)
# implicit type, with options
self.assertEqual(
{
"options": {"a": "b"},
"secret_id": "the_id",
"secrets_backend_type": "some-default-backend-type",
"role": None,
},
SecretSpec.secret_spec_from_dict(
{"id": "the_id", "options": {"a": "b"}}, None
).to_json(),
)
# test role resolution - source level wins
self.assertDictEqual(
{
"secret_id": "the_id",
"secrets_backend_type": "some-default-backend-type",
"role": "source-level-role",
"options": {},
},
SecretSpec.secret_spec_from_dict(
{"id": "the_id", "role": "source-level-role"},
"decorator-level-role",
).to_json(),
)
# test role resolution - default to decorator level if source level unset
self.assertDictEqual(
{
"secret_id": "the_id",
"secrets_backend_type": "some-default-backend-type",
"role": "decorator-level-role",
"options": {},
},
SecretSpec.secret_spec_from_dict(
{"id": "the_id"},
role="decorator-level-role",
).to_json(),
)
# check raise on bad type field
with self.assertRaises(MetaflowException):
SecretSpec.secret_spec_from_dict(
{
"type": 42,
"id": "the_id",
},
None,
)
# check raise on bad id field
with self.assertRaises(MetaflowException):
SecretSpec.secret_spec_from_dict(
{
"id": 42,
},
None,
)
# check raise on bad options field
with self.assertRaises(MetaflowException):
SecretSpec.secret_spec_from_dict({"id": "the_id", "options": []}, None)
# check raise on bad role field
with self.assertRaises(MetaflowException):
SecretSpec.secret_spec_from_dict({"id": "the_id", "role": 42}, None)
def test_secrets_provider_resolution(self):
with self.assertRaises(MetaflowException):
get_secrets_backend_provider(str(time.time()))
| TestSecretsDecorator |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0107_alter_project_language.py | {
"start": 148,
"end": 14519
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0106_add_addons_config"),
]
operations = [
migrations.AlterField(
model_name="historicalproject",
name="language",
field=models.CharField(
choices=[
("aa", "Afar"),
("ab", "Abkhaz"),
("acr", "Achi"),
("af", "Afrikaans"),
("agu", "Awakateko"),
("am", "Amharic"),
("ar", "Arabic"),
("as", "Assamese"),
("ay", "Aymara"),
("az", "Azerbaijani"),
("ba", "Bashkir"),
("be", "Belarusian"),
("bg", "Bulgarian"),
("bh", "Bihari"),
("bi", "Bislama"),
("bn", "Bengali"),
("bo", "Tibetan"),
("br", "Breton"),
("ca", "Catalan"),
("caa", "Ch'orti'"),
("cac", "Chuj"),
("cab", "Garífuna"),
("cak", "Kaqchikel"),
("co", "Corsican"),
("cs", "Czech"),
("cy", "Welsh"),
("da", "Danish"),
("de", "German"),
("dz", "Dzongkha"),
("el", "Greek"),
("en", "English"),
("eo", "Esperanto"),
("es", "Spanish"),
("et", "Estonian"),
("eu", "Basque"),
("fa", "Iranian"),
("fi", "Finnish"),
("fj", "Fijian"),
("fo", "Faroese"),
("fr", "French"),
("fy", "Western Frisian"),
("ga", "Irish"),
("gd", "Scottish Gaelic"),
("gl", "Galician"),
("gn", "Guarani"),
("gu", "Gujarati"),
("ha", "Hausa"),
("hi", "Hindi"),
("he", "Hebrew"),
("hr", "Croatian"),
("hu", "Hungarian"),
("hy", "Armenian"),
("ia", "Interlingua"),
("id", "Indonesian"),
("ie", "Interlingue"),
("ik", "Inupiaq"),
("is", "Icelandic"),
("it", "Italian"),
("itz", "Itza'"),
("iu", "Inuktitut"),
("ixl", "Ixil"),
("ja", "Japanese"),
("jac", "Popti'"),
("jv", "Javanese"),
("ka", "Georgian"),
("kjb", "Q'anjob'al"),
("kek", "Q'eqchi'"),
("kk", "Kazakh"),
("kl", "Kalaallisut"),
("km", "Khmer"),
("kn", "Kannada"),
("knj", "Akateko"),
("ko", "Korean"),
("ks", "Kashmiri"),
("ku", "Kurdish"),
("ky", "Kyrgyz"),
("la", "Latin"),
("ln", "Lingala"),
("lo", "Lao"),
("lt", "Lithuanian"),
("lv", "Latvian"),
("mam", "Mam"),
("mg", "Malagasy"),
("mi", "Maori"),
("mk", "Macedonian"),
("ml", "Malayalam"),
("mn", "Mongolian"),
("mop", "Mopan"),
("mr", "Marathi"),
("ms", "Malay"),
("mt", "Maltese"),
("my", "Burmese"),
("na", "Nauru"),
("ne", "Nepali"),
("nl", "Dutch"),
("no", "Norwegian"),
("oc", "Occitan"),
("om", "Oromo"),
("or", "Oriya"),
("pa", "Panjabi"),
("pl", "Polish"),
("pnb", "Western Punjabi"),
("poc", "Poqomam"),
("poh", "Poqomchi"),
("ps", "Pashto"),
("pt", "Portuguese"),
("qu", "Quechua"),
("quc", "K'iche'"),
("qum", "Sipakapense"),
("quv", "Sakapulteko"),
("rm", "Romansh"),
("rn", "Kirundi"),
("ro", "Romanian"),
("ru", "Russian"),
("rw", "Kinyarwanda"),
("sa", "Sanskrit"),
("sd", "Sindhi"),
("sg", "Sango"),
("si", "Sinhala"),
("sk", "Slovak"),
("skr", "Saraiki"),
("sl", "Slovenian"),
("sm", "Samoan"),
("sn", "Shona"),
("so", "Somali"),
("sq", "Albanian"),
("sr", "Serbian"),
("ss", "Swati"),
("st", "Southern Sotho"),
("su", "Sudanese"),
("sv", "Swedish"),
("sw", "Swahili"),
("ta", "Tamil"),
("te", "Telugu"),
("tg", "Tajik"),
("th", "Thai"),
("ti", "Tigrinya"),
("tk", "Turkmen"),
("tl", "Tagalog"),
("tn", "Tswana"),
("to", "Tonga"),
("tr", "Turkish"),
("ts", "Tsonga"),
("tt", "Tatar"),
("ttc", "Tektiteko"),
("tzj", "Tz'utujil"),
("tw", "Twi"),
("ug", "Uyghur"),
("uk", "Ukrainian"),
("ur", "Urdu"),
("usp", "Uspanteko"),
("uz", "Uzbek"),
("vi", "Vietnamese"),
("vo", "Volapuk"),
("wo", "Wolof"),
("xh", "Xhosa"),
("xin", "Xinka"),
("yi", "Yiddish"),
("yo", "Yoruba"),
("za", "Zhuang"),
("zh", "Chinese"),
("zu", "Zulu"),
("nb-no", "Norwegian Bokmal"),
("pt-br", "Brazilian Portuguese"),
("es-mx", "Mexican Spanish"),
("uk-ua", "Ukrainian"),
("zh-cn", "Simplified Chinese"),
("zh-tw", "Traditional Chinese"),
],
default="en",
help_text="The language the project documentation is rendered in. Note: this affects your project's URL.",
max_length=20,
verbose_name="Language",
),
),
migrations.AlterField(
model_name="project",
name="language",
field=models.CharField(
choices=[
("aa", "Afar"),
("ab", "Abkhaz"),
("acr", "Achi"),
("af", "Afrikaans"),
("agu", "Awakateko"),
("am", "Amharic"),
("ar", "Arabic"),
("as", "Assamese"),
("ay", "Aymara"),
("az", "Azerbaijani"),
("ba", "Bashkir"),
("be", "Belarusian"),
("bg", "Bulgarian"),
("bh", "Bihari"),
("bi", "Bislama"),
("bn", "Bengali"),
("bo", "Tibetan"),
("br", "Breton"),
("ca", "Catalan"),
("caa", "Ch'orti'"),
("cac", "Chuj"),
("cab", "Garífuna"),
("cak", "Kaqchikel"),
("co", "Corsican"),
("cs", "Czech"),
("cy", "Welsh"),
("da", "Danish"),
("de", "German"),
("dz", "Dzongkha"),
("el", "Greek"),
("en", "English"),
("eo", "Esperanto"),
("es", "Spanish"),
("et", "Estonian"),
("eu", "Basque"),
("fa", "Iranian"),
("fi", "Finnish"),
("fj", "Fijian"),
("fo", "Faroese"),
("fr", "French"),
("fy", "Western Frisian"),
("ga", "Irish"),
("gd", "Scottish Gaelic"),
("gl", "Galician"),
("gn", "Guarani"),
("gu", "Gujarati"),
("ha", "Hausa"),
("hi", "Hindi"),
("he", "Hebrew"),
("hr", "Croatian"),
("hu", "Hungarian"),
("hy", "Armenian"),
("ia", "Interlingua"),
("id", "Indonesian"),
("ie", "Interlingue"),
("ik", "Inupiaq"),
("is", "Icelandic"),
("it", "Italian"),
("itz", "Itza'"),
("iu", "Inuktitut"),
("ixl", "Ixil"),
("ja", "Japanese"),
("jac", "Popti'"),
("jv", "Javanese"),
("ka", "Georgian"),
("kjb", "Q'anjob'al"),
("kek", "Q'eqchi'"),
("kk", "Kazakh"),
("kl", "Kalaallisut"),
("km", "Khmer"),
("kn", "Kannada"),
("knj", "Akateko"),
("ko", "Korean"),
("ks", "Kashmiri"),
("ku", "Kurdish"),
("ky", "Kyrgyz"),
("la", "Latin"),
("ln", "Lingala"),
("lo", "Lao"),
("lt", "Lithuanian"),
("lv", "Latvian"),
("mam", "Mam"),
("mg", "Malagasy"),
("mi", "Maori"),
("mk", "Macedonian"),
("ml", "Malayalam"),
("mn", "Mongolian"),
("mop", "Mopan"),
("mr", "Marathi"),
("ms", "Malay"),
("mt", "Maltese"),
("my", "Burmese"),
("na", "Nauru"),
("ne", "Nepali"),
("nl", "Dutch"),
("no", "Norwegian"),
("oc", "Occitan"),
("om", "Oromo"),
("or", "Oriya"),
("pa", "Panjabi"),
("pl", "Polish"),
("pnb", "Western Punjabi"),
("poc", "Poqomam"),
("poh", "Poqomchi"),
("ps", "Pashto"),
("pt", "Portuguese"),
("qu", "Quechua"),
("quc", "K'iche'"),
("qum", "Sipakapense"),
("quv", "Sakapulteko"),
("rm", "Romansh"),
("rn", "Kirundi"),
("ro", "Romanian"),
("ru", "Russian"),
("rw", "Kinyarwanda"),
("sa", "Sanskrit"),
("sd", "Sindhi"),
("sg", "Sango"),
("si", "Sinhala"),
("sk", "Slovak"),
("skr", "Saraiki"),
("sl", "Slovenian"),
("sm", "Samoan"),
("sn", "Shona"),
("so", "Somali"),
("sq", "Albanian"),
("sr", "Serbian"),
("ss", "Swati"),
("st", "Southern Sotho"),
("su", "Sudanese"),
("sv", "Swedish"),
("sw", "Swahili"),
("ta", "Tamil"),
("te", "Telugu"),
("tg", "Tajik"),
("th", "Thai"),
("ti", "Tigrinya"),
("tk", "Turkmen"),
("tl", "Tagalog"),
("tn", "Tswana"),
("to", "Tonga"),
("tr", "Turkish"),
("ts", "Tsonga"),
("tt", "Tatar"),
("ttc", "Tektiteko"),
("tzj", "Tz'utujil"),
("tw", "Twi"),
("ug", "Uyghur"),
("uk", "Ukrainian"),
("ur", "Urdu"),
("usp", "Uspanteko"),
("uz", "Uzbek"),
("vi", "Vietnamese"),
("vo", "Volapuk"),
("wo", "Wolof"),
("xh", "Xhosa"),
("xin", "Xinka"),
("yi", "Yiddish"),
("yo", "Yoruba"),
("za", "Zhuang"),
("zh", "Chinese"),
("zu", "Zulu"),
("nb-no", "Norwegian Bokmal"),
("pt-br", "Brazilian Portuguese"),
("es-mx", "Mexican Spanish"),
("uk-ua", "Ukrainian"),
("zh-cn", "Simplified Chinese"),
("zh-tw", "Traditional Chinese"),
],
default="en",
help_text="The language the project documentation is rendered in. Note: this affects your project's URL.",
max_length=20,
verbose_name="Language",
),
),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/data/ops/range_op.py | {
"start": 1099,
"end": 2799
} | class ____(dataset_ops.DatasetSource):
"""A `Dataset` of a step separated range of values."""
def __init__(self, *args, **kwargs):
"""See `Dataset.range()` for details."""
self._parse_args(*args, **kwargs)
self._structure = tensor_spec.TensorSpec([], self._output_type)
variant_tensor = gen_dataset_ops.range_dataset(
start=self._start,
stop=self._stop,
step=self._step,
**self._common_args)
super().__init__(variant_tensor)
def _parse_args(self, *args, **kwargs):
"""Parses arguments according to the same rules as the `range()` builtin."""
if len(args) == 1:
self._start = self._build_tensor(0, "start")
self._stop = self._build_tensor(args[0], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 2:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 3:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(args[2], "step")
else:
raise ValueError(f"Invalid `args`. The length of `args` should be "
f"between 1 and 3 but was {len(args)}.")
if "output_type" in kwargs:
self._output_type = kwargs["output_type"]
else:
self._output_type = dtypes.int64
self._name = kwargs["name"] if "name" in kwargs else None
def _build_tensor(self, int64_value, name):
return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)
@property
def element_spec(self):
return self._structure
| _RangeDataset |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 23710,
"end": 23815
} | class ____(sqltypes.CHAR):
def get_dbapi_type(self, dbapi):
return dbapi.FIXED_CHAR
| _OracleChar |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams1.py | {
"start": 158,
"end": 263
} | class ____[T2]: ...
def func2[T2](): ...
# This should generate an error because T3 is duplicated.
| ClassB |
python | numpy__numpy | numpy/_core/tests/test_strings.py | {
"start": 52409,
"end": 54251
} | class ____:
def test_center(self):
buf = np.array("😊", dtype="U")
fill = np.array("*", dtype="S")
res = np.array("*😊*", dtype="U")
assert_array_equal(np.strings.center(buf, 3, fill), res)
buf = np.array("s", dtype="S")
fill = np.array("*", dtype="U")
res = np.array("*s*", dtype="S")
assert_array_equal(np.strings.center(buf, 3, fill), res)
with pytest.raises(ValueError, match="'ascii' codec can't encode"):
buf = np.array("s", dtype="S")
fill = np.array("😊", dtype="U")
np.strings.center(buf, 3, fill)
def test_ljust(self):
buf = np.array("😊", dtype="U")
fill = np.array("*", dtype="S")
res = np.array("😊**", dtype="U")
assert_array_equal(np.strings.ljust(buf, 3, fill), res)
buf = np.array("s", dtype="S")
fill = np.array("*", dtype="U")
res = np.array("s**", dtype="S")
assert_array_equal(np.strings.ljust(buf, 3, fill), res)
with pytest.raises(ValueError, match="'ascii' codec can't encode"):
buf = np.array("s", dtype="S")
fill = np.array("😊", dtype="U")
np.strings.ljust(buf, 3, fill)
def test_rjust(self):
buf = np.array("😊", dtype="U")
fill = np.array("*", dtype="S")
res = np.array("**😊", dtype="U")
assert_array_equal(np.strings.rjust(buf, 3, fill), res)
buf = np.array("s", dtype="S")
fill = np.array("*", dtype="U")
res = np.array("**s", dtype="S")
assert_array_equal(np.strings.rjust(buf, 3, fill), res)
with pytest.raises(ValueError, match="'ascii' codec can't encode"):
buf = np.array("s", dtype="S")
fill = np.array("😊", dtype="U")
np.strings.rjust(buf, 3, fill)
| TestMixedTypeMethods |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI033.py | {
"start": 1797,
"end": 1928
} | class ____:
N: TypeAlias = None # type: can't parse me either!
# This whole line is commented out and indented # type: str
| Bar |
python | numba__numba | numba/tests/test_listobject.py | {
"start": 32004,
"end": 32888
} | class ____(MemoryLeakMixin, TestCase):
"""Test list copy. """
def test_list_copy_empty(self):
@njit
def foo():
l = listobject.new_list(int32)
n = l.copy()
return len(l), len(n)
self.assertEqual(foo(), (0, 0))
def test_list_copy_singleton(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
n = l.copy()
return len(l), len(n), l[0], n[0]
self.assertEqual(foo(), (1, 1, 0, 0))
def test_list_copy_multiple(self):
@njit
def foo():
l = listobject.new_list(int32)
for j in range(10, 13):
l.append(j)
n = l.copy()
return len(l), len(n), l[0], l[1], l[2], l[0], l[1], l[2]
self.assertEqual(foo(), (3, 3, 10, 11, 12, 10, 11, 12))
| TestCopy |
python | doocs__leetcode | solution/1600-1699/1641.Count Sorted Vowel Strings/Solution.py | {
"start": 0,
"end": 204
} | class ____:
def countVowelStrings(self, n: int) -> int:
@cache
def dfs(i, j):
return 1 if i >= n else sum(dfs(i + 1, k) for k in range(j, 5))
return dfs(0, 0)
| Solution |
python | PyCQA__pylint | tests/functional/s/slots_checks.py | {
"start": 3675,
"end": 3738
} | class ____:
__slots__ = ["a", "b", "c"]
| SlotsManipulationTest |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/github_client.py | {
"start": 544,
"end": 1709
} | class ____(DataClassJsonMixin):
"""
Dataclass for the response from the Github API's getTree endpoint.
Attributes:
- sha (str): SHA1 checksum ID of the tree.
- url (str): URL for the tree.
- tree (List[GitTreeObject]): List of objects in the tree.
- truncated (bool): Whether the tree is truncated.
Examples:
>>> tree = client.get_tree("owner", "repo", "branch")
>>> tree.sha
"""
@dataclass
class GitTreeObject(DataClassJsonMixin):
"""
Dataclass for the objects in the tree.
Attributes:
- path (str): Path to the object.
- mode (str): Mode of the object.
- type (str): Type of the object.
- sha (str): SHA1 checksum ID of the object.
- url (str): URL for the object.
- size (Optional[int]): Size of the object (only for blobs).
"""
path: str
mode: str
type: str
sha: str
url: Optional[str] = None
size: Optional[int] = None
sha: str
url: str
tree: List[GitTreeObject]
truncated: bool
@dataclass
| GitTreeResponseModel |
python | scipy__scipy | scipy/interpolate/tests/test_rgi.py | {
"start": 34177,
"end": 49988
} | class ____:
def _sample_2d_data(self):
x = np.array([.5, 2., 3., 4., 5.5, 6.])
y = np.array([.5, 2., 3., 4., 5.5, 6.])
z = np.array(
[
[1, 2, 1, 2, 1, 1],
[1, 2, 1, 2, 1, 1],
[1, 2, 3, 2, 1, 1],
[1, 2, 2, 2, 1, 1],
[1, 2, 1, 2, 1, 1],
[1, 2, 2, 2, 1, 1],
]
)
return x, y, z
def test_spline_2d(self):
x, y, z = self._sample_2d_data()
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"),
lut.ev(xi[:, 0], xi[:, 1]))
@parametrize_rgi_interp_methods
def test_list_input(self, method):
x, y, z = self._sample_2d_data()
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
v1 = interpn((x, y), z, xi, method=method)
v2 = interpn(
(x.tolist(), y.tolist()), z.tolist(), xi.tolist(), method=method
)
xp_assert_close(v1, v2, err_msg=method)
def test_spline_2d_outofbounds(self):
x = np.array([.5, 2., 3., 4., 5.5])
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
actual = interpn((x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=999.99)
expected = lut.ev(xi[:, 0], xi[:, 1])
expected[2:4] = 999.99
assert_array_almost_equal(actual, expected)
# no extrapolation for splinef2d
assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=None)
def _sample_4d_data(self):
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_linear_4d(self):
# create a 4-D grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="linear")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_linear_outofbounds(self):
# create a 4-D grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = np.asarray([999.99])
actual = interpn(points, values, sample, method="linear",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_nearest_4d(self):
# create a 4-D grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="nearest")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_nearest_outofbounds(self):
# create a 4-D grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = np.asarray([999.99])
actual = interpn(points, values, sample, method="nearest",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_xi_1d(self):
# verify that 1-D xi works as expected
points, values = self._sample_4d_data()
sample = np.asarray([0.1, 0.1, 10., 9.])
v1 = interpn(points, values, sample, bounds_error=False)
v2 = interpn(points, values, sample[None,:], bounds_error=False)
xp_assert_close(v1, v2)
def test_xi_nd(self):
# verify that higher-d xi works as expected
points, values = self._sample_4d_data()
np.random.seed(1234)
sample = np.random.rand(2, 3, 4)
v1 = interpn(points, values, sample, method='nearest',
bounds_error=False)
assert v1.shape == (2, 3)
v2 = interpn(points, values, sample.reshape(-1, 4),
method='nearest', bounds_error=False)
xp_assert_close(v1, v2.reshape(v1.shape))
@parametrize_rgi_interp_methods
def test_xi_broadcast(self, method):
# verify that the interpolators broadcast xi
x, y, values = self._sample_2d_data()
points = (x, y)
xi = np.linspace(0, 1, 2)
yi = np.linspace(0, 3, 3)
sample = (xi[:, None], yi[None, :])
v1 = interpn(points, values, sample, method=method, bounds_error=False)
assert v1.shape == (2, 3)
xx, yy = np.meshgrid(xi, yi)
sample = np.c_[xx.T.ravel(), yy.T.ravel()]
v2 = interpn(points, values, sample,
method=method, bounds_error=False)
xp_assert_close(v1, v2.reshape(v1.shape))
@pytest.mark.fail_slow(5)
@parametrize_rgi_interp_methods
def test_nonscalar_values(self, method):
if method == "quintic":
pytest.skip("Way too slow.")
# Verify that non-scalar valued values also works
points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5)] * 2 + [
(0.0, 5.0, 10.0, 15.0, 20, 25.0)
] * 2
rng = np.random.default_rng(1234)
values = rng.random((6, 6, 6, 6, 8))
sample = rng.random((7, 3, 4))
v = interpn(points, values, sample, method=method,
bounds_error=False)
assert v.shape == (7, 3, 8), method
vs = [interpn(points, values[..., j], sample, method=method,
bounds_error=False) for j in range(8)]
v2 = np.array(vs).transpose(1, 2, 0)
xp_assert_close(v, v2, atol=1e-14, err_msg=method)
@parametrize_rgi_interp_methods
def test_nonscalar_values_2(self, method):
if method in {"cubic", "quintic"}:
pytest.skip("Way too slow.")
# Verify that non-scalar valued values also work : use different
# lengths of axes to simplify tracing the internals
points = [(0.0, 0.5, 1.0, 1.5, 2.0, 2.5),
(0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0),
(0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0),
(0.0, 5.0, 10.0, 15.0, 20, 25.0, 35.0, 36.0, 47)]
rng = np.random.default_rng(1234)
trailing_points = (3, 2)
# NB: values has a `num_trailing_dims` trailing dimension
values = rng.random((6, 7, 8, 9, *trailing_points))
sample = rng.random(4) # a single sample point !
v = interpn(points, values, sample, method=method, bounds_error=False)
# v has a single sample point *per entry in the trailing dimensions*
assert v.shape == (1, *trailing_points)
# check the values, too : manually loop over the trailing dimensions
vs = [[
interpn(points, values[..., i, j], sample, method=method,
bounds_error=False) for i in range(values.shape[-2])
] for j in range(values.shape[-1])]
xp_assert_close(v, np.asarray(vs).T, atol=1e-14, err_msg=method)
def test_non_scalar_values_splinef2d(self):
# Vector-valued splines supported with fitpack
points, values = self._sample_4d_data()
np.random.seed(1234)
values = np.random.rand(3, 3, 3, 3, 6)
sample = np.random.rand(7, 11, 4)
assert_raises(ValueError, interpn, points, values, sample,
method='splinef2d')
@parametrize_rgi_interp_methods
def test_complex(self, method):
if method == "pchip":
pytest.skip("pchip does not make sense for complex data")
x, y, values = self._sample_2d_data()
points = (x, y)
values = values - 2j*values
sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
v1 = interpn(points, values, sample, method=method)
v2r = interpn(points, values.real, sample, method=method)
v2i = interpn(points, values.imag, sample, method=method)
v2 = v2r + 1j*v2i
xp_assert_close(v1, v2)
def test_complex_pchip(self):
# Complex-valued data deprecated for pchip
x, y, values = self._sample_2d_data()
points = (x, y)
values = values - 2j*values
sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
with pytest.raises(ValueError, match='real'):
interpn(points, values, sample, method='pchip')
def test_complex_spline2fd(self):
# Complex-valued data not supported by spline2fd
x, y, values = self._sample_2d_data()
points = (x, y)
values = values - 2j*values
sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
with pytest.warns(ComplexWarning):
interpn(points, values, sample, method='splinef2d')
@pytest.mark.parametrize(
"method",
["linear", "nearest"]
)
def test_duck_typed_values(self, method):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
v1 = interpn((x, y), values, [0.4, 0.7], method=method)
v2 = interpn((x, y), values._v, [0.4, 0.7], method=method)
xp_assert_close(v1, v2, check_dtype=False)
@skip_xp_invalid_arg
@parametrize_rgi_interp_methods
def test_matrix_input(self, method):
"""np.matrix inputs are allowed for backwards compatibility"""
x = np.linspace(0, 2, 6)
y = np.linspace(0, 1, 7)
values = matrix(np.random.rand(6, 7))
sample = np.random.rand(3, 7, 2)
v1 = interpn((x, y), values, sample, method=method)
v2 = interpn((x, y), np.asarray(values), sample, method=method)
if method == "quintic":
# https://github.com/scipy/scipy/issues/20472
xp_assert_close(v1, v2, atol=5e-5, rtol=2e-6)
else:
xp_assert_close(v1, v2)
def test_length_one_axis(self):
# gh-5890, gh-9524 : length-1 axis is legal for method='linear'.
# Along the axis it's linear interpolation; away from the length-1
# axis, it's an extrapolation, so fill_value should be used.
values = np.array([[0.1, 1, 10]])
xi = np.array([[1, 2.2], [1, 3.2], [1, 3.8]])
res = interpn(([1], [2, 3, 4]), values, xi)
wanted = [0.9*0.2 + 0.1, # on [2, 3) it's 0.9*(x-2) + 0.1
9*0.2 + 1, # on [3, 4] it's 9*(x-3) + 1
9*0.8 + 1]
xp_assert_close(res, wanted, atol=1e-15)
# check extrapolation
xi = np.array([[1.1, 2.2], [1.5, 3.2], [-2.3, 3.8]])
res = interpn(([1], [2, 3, 4]), values, xi,
bounds_error=False, fill_value=None)
xp_assert_close(res, wanted, atol=1e-15)
def test_descending_points(self):
def value_func_4d(x, y, z, a):
return 2 * x ** 3 + 3 * y ** 2 - z - a
x1 = np.array([0, 1, 2, 3])
x2 = np.array([0, 10, 20, 30])
x3 = np.array([0, 10, 20, 30])
x4 = np.array([0, .1, .2, .30])
points = (x1, x2, x3, x4)
values = value_func_4d(
*np.meshgrid(*points, indexing='ij', sparse=True))
pts = (0.1, 0.3, np.transpose(np.linspace(0, 30, 4)),
np.linspace(0, 0.3, 4))
correct_result = interpn(points, values, pts)
x1_descend = x1[::-1]
x2_descend = x2[::-1]
x3_descend = x3[::-1]
x4_descend = x4[::-1]
points_shuffled = (x1_descend, x2_descend, x3_descend, x4_descend)
values_shuffled = value_func_4d(
*np.meshgrid(*points_shuffled, indexing='ij', sparse=True))
test_result = interpn(points_shuffled, values_shuffled, pts)
xp_assert_equal(correct_result, test_result)
def test_invalid_points_order(self):
x = np.array([.5, 2., 0., 4., 5.5]) # not ascending or descending
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
match = "must be strictly ascending or descending"
with pytest.raises(ValueError, match=match):
interpn((x, y), z, xi)
def test_invalid_xi_dimensions(self):
# https://github.com/scipy/scipy/issues/16519
points = [(0, 1)]
values = [0, 1]
xi = np.ones((1, 1, 3))
msg = ("The requested sample points xi have dimension 3, but this "
"RegularGridInterpolator has dimension 1")
with assert_raises(ValueError, match=msg):
interpn(points, values, xi)
def test_readonly_grid(self):
# https://github.com/scipy/scipy/issues/17716
x = np.linspace(0, 4, 5)
y = np.linspace(0, 5, 6)
z = np.linspace(0, 6, 7)
points = (x, y, z)
values = np.ones((5, 6, 7))
point = np.array([2.21, 3.12, 1.15])
for d in points:
d.flags.writeable = False
values.flags.writeable = False
point.flags.writeable = False
interpn(points, values, point)
RegularGridInterpolator(points, values)(point)
def test_2d_readonly_grid(self):
# https://github.com/scipy/scipy/issues/17716
# test special 2d case
x = np.linspace(0, 4, 5)
y = np.linspace(0, 5, 6)
points = (x, y)
values = np.ones((5, 6))
point = np.array([2.21, 3.12])
for d in points:
d.flags.writeable = False
values.flags.writeable = False
point.flags.writeable = False
interpn(points, values, point)
RegularGridInterpolator(points, values)(point)
def test_non_c_contiguous_grid(self):
# https://github.com/scipy/scipy/issues/17716
x = np.linspace(0, 4, 5)
x = np.vstack((x, np.empty_like(x))).T.copy()[:, 0]
assert not x.flags.c_contiguous
y = np.linspace(0, 5, 6)
z = np.linspace(0, 6, 7)
points = (x, y, z)
values = np.ones((5, 6, 7))
point = np.array([2.21, 3.12, 1.15])
interpn(points, values, point)
RegularGridInterpolator(points, values)(point)
@pytest.mark.parametrize("dtype", ['>f8', '<f8'])
def test_endianness(self, dtype):
# https://github.com/scipy/scipy/issues/17716
# test special 2d case
x = np.linspace(0, 4, 5, dtype=dtype)
y = np.linspace(0, 5, 6, dtype=dtype)
points = (x, y)
values = np.ones((5, 6), dtype=dtype)
point = np.array([2.21, 3.12], dtype=dtype)
interpn(points, values, point)
RegularGridInterpolator(points, values)(point)
| TestInterpN |
python | django__django | django/db/models/aggregates.py | {
"start": 1358,
"end": 1805
} | class ____(OrderByList):
template = " ORDER BY %(expressions)s"
def as_sql(self, compiler, connection, **extra_context):
if not connection.features.supports_aggregate_order_by_clause:
raise NotSupportedError(
"This database backend does not support specifying an order on "
"aggregates."
)
return super().as_sql(compiler, connection, **extra_context)
| AggregateOrderBy |
python | kubernetes-client__python | kubernetes/client/models/v1_priority_level_configuration_spec.py | {
"start": 383,
"end": 6229
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'exempt': 'V1ExemptPriorityLevelConfiguration',
'limited': 'V1LimitedPriorityLevelConfiguration',
'type': 'str'
}
attribute_map = {
'exempt': 'exempt',
'limited': 'limited',
'type': 'type'
}
def __init__(self, exempt=None, limited=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1PriorityLevelConfigurationSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._exempt = None
self._limited = None
self._type = None
self.discriminator = None
if exempt is not None:
self.exempt = exempt
if limited is not None:
self.limited = limited
self.type = type
@property
def exempt(self):
"""Gets the exempt of this V1PriorityLevelConfigurationSpec. # noqa: E501
:return: The exempt of this V1PriorityLevelConfigurationSpec. # noqa: E501
:rtype: V1ExemptPriorityLevelConfiguration
"""
return self._exempt
@exempt.setter
def exempt(self, exempt):
"""Sets the exempt of this V1PriorityLevelConfigurationSpec.
:param exempt: The exempt of this V1PriorityLevelConfigurationSpec. # noqa: E501
:type: V1ExemptPriorityLevelConfiguration
"""
self._exempt = exempt
@property
def limited(self):
"""Gets the limited of this V1PriorityLevelConfigurationSpec. # noqa: E501
:return: The limited of this V1PriorityLevelConfigurationSpec. # noqa: E501
:rtype: V1LimitedPriorityLevelConfiguration
"""
return self._limited
@limited.setter
def limited(self, limited):
"""Sets the limited of this V1PriorityLevelConfigurationSpec.
:param limited: The limited of this V1PriorityLevelConfigurationSpec. # noqa: E501
:type: V1LimitedPriorityLevelConfiguration
"""
self._limited = limited
@property
def type(self):
"""Gets the type of this V1PriorityLevelConfigurationSpec. # noqa: E501
`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required. # noqa: E501
:return: The type of this V1PriorityLevelConfigurationSpec. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1PriorityLevelConfigurationSpec.
`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required. # noqa: E501
:param type: The type of this V1PriorityLevelConfigurationSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PriorityLevelConfigurationSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PriorityLevelConfigurationSpec):
return True
return self.to_dict() != other.to_dict()
| V1PriorityLevelConfigurationSpec |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 293399,
"end": 293611
} | class ____(TypedDict, total=False):
"""
:class:`altair.TooltipContent` ``TypedDict`` wrapper.
Parameters
----------
content
"""
content: Literal["encoding", "data"]
| TooltipContentKwds |
python | astropy__astropy | astropy/visualization/lupton_rgb.py | {
"start": 16781,
"end": 25129
} | class ____(RGBImageMapping):
"""
Class to map red, blue, green images into either a normalized float or
an 8-bit image, by performing optional clipping and applying
a scaling function to each band in non-independent manner that depends
on the other bands, following the scaling scheme presented in
Lupton et al. 2004.
Parameters
----------
interval : `~astropy.visualization.BaseInterval` subclass instance or array-like, optional
The interval object to apply to the data (either a single instance or
an array for R, G, B). Default is
`~astropy.visualization.ManualInterval`.
stretch : `~astropy.visualization.BaseStretch` subclass instance
The stretch object to apply to the data. The default is
`~astropy.visualization.AsinhLuptonStretch`.
"""
def __init__(
self,
interval=ManualInterval(vmin=0, vmax=None),
stretch=LuptonAsinhStretch(stretch=5, Q=8),
):
super().__init__(interval=interval, stretch=stretch)
self._pixmax = 1.0
def intensity(self, image_r, image_g, image_b):
"""
Return the total intensity from the red, blue, and green intensities.
This is a naive computation, and may be overridden by subclasses.
Parameters
----------
image_r : ndarray
Intensity of image to be mapped to red; or total intensity if
``image_g`` and ``image_b`` are None.
image_g : ndarray, optional
Intensity of image to be mapped to green.
image_b : ndarray, optional
Intensity of image to be mapped to blue.
Returns
-------
intensity : ndarray
Total intensity from the red, blue and green intensities, or
``image_r`` if green and blue images are not provided.
"""
return compute_intensity(image_r, image_g, image_b)
def apply_mappings(self, image_r, image_g, image_b):
"""
Apply mapping stretch and intervals to convert images image_r, image_g,
and image_b to a triplet of normalized images, following the scaling
scheme presented in Lupton et al. 2004.
Compared to astropy's ImageNormalize which first normalizes images
by cropping and linearly mapping onto [0.,1.] and then applies
a specified stretch algorithm, the Lupton et al. algorithm applies
stretching to an multi-color intensity and then computes per-band
scaled images with bound cropping.
This is modified here by allowing for different minimum values
for each of the input r, g, b images, and then computing
the intensity on the subtracted images.
Parameters
----------
image_r : ndarray
Intensity of image to be mapped to red
image_g : ndarray
Intensity of image to be mapped to green.
image_b : ndarray
Intensity of image to be mapped to blue.
Returns
-------
image_rgb : ndarray
Triplet of mapped images based on the specified (per-band)
intervals and the stretch function
Notes
-----
The Lupton et al 2004 algorithm is computed with the following steps:
1. Shift each band with the minimum values
2. Compute the intensity I and stretched intensity f(I)
3. Compute the ratio of the stretched intensity to intensity f(I)/I,
and clip to a lower bound of 0
4. Compute the scaled band images by multiplying with the ratio f(I)/I
5. Clip each band to a lower bound of 0
6. Scale down pixels where max(R,G,B)>1 by the value max(R,G,B)
"""
image_r = np.array(image_r, copy=True)
image_g = np.array(image_g, copy=True)
image_b = np.array(image_b, copy=True)
# Subtract per-band minima
image_rgb = [image_r, image_g, image_b]
for i, img in enumerate(image_rgb):
vmin, _ = self.intervals[i].get_limits(img)
image_rgb[i] = np.subtract(img, vmin)
image_rgb = np.asarray(image_rgb)
# Determine the intensity and streteched intensity
Int = self.intensity(*image_rgb)
fI = self.stretch(Int, clip=False)
# Get normalized fI, and clip to lower bound of 0:
fInorm = np.where(Int <= 0, 0, np.true_divide(fI, Int))
# Compute X = x * f(I) / I for each filter x=(r,g,b)
np.multiply(image_rgb, fInorm, out=image_rgb)
# Clip individual bands to minimum of 0, as
# individual bands can be < 0 even if fI/I isn't.
image_rgb = np.clip(image_rgb, 0.0, None)
# Determine the max of all 3 bands at each position
maxRGB = np.max(image_rgb, axis=0)
with np.errstate(invalid="ignore", divide="ignore"):
image_rgb = np.where(
maxRGB > self._pixmax,
np.true_divide(image_rgb * self._pixmax, maxRGB),
image_rgb,
)
return np.asarray(image_rgb)
def make_lupton_rgb(
image_r,
image_g,
image_b,
interval=None,
stretch_object=None,
minimum=None,
stretch=5,
Q=8,
filename=None,
output_dtype=np.uint8,
):
r"""
Return a Red/Green/Blue color image from 3 images using interconnected
band scaling, and an arbitrary stretch function (by default, an asinh stretch).
The input images can be int or float, and in any range or bit-depth.
For a more detailed look at the use of this method, see the document
:ref:`astropy:astropy-visualization-rgb`.
Parameters
----------
image_r : ndarray
Image to map to red.
image_g : ndarray
Image to map to green.
image_b : ndarray
Image to map to blue.
interval : `~astropy.visualization.BaseInterval` subclass instance or array-like, optional
The interval object to apply to the data (either a single instance or
an array for R, G, B). Default is
`~astropy.visualization.ManualInterval` with vmin=0.
stretch_object : `~astropy.visualization.BaseStretch` subclass instance, optional
The stretch object to apply to the data. If set, the input values of
``minimum``, ``stretch``, and ``Q`` will be ignored.
For the Lupton scheme, this would be an instance of
`~astropy.visualization.LuptonAsinhStretch`, but alternatively
`~astropy.visualization.LuptonAsinhZscaleStretch` or some other
stretch can be used.
minimum : float or array-like, optional
Deprecated. Intensity that should be mapped to black (a scalar or
array of R, G, B). If `None`, each image's minimum value is used.
Default is None.
stretch : float, optional
The linear stretch of the image. Default is 5
Q : float, optional
The asinh softening parameter. Default is 8.
filename : str, optional
Write the resulting RGB image to a file (file type determined
from extension).
output_dtype : numpy scalar type, optional
Image output data type. Default is np.uint8.
Returns
-------
rgb : ndarray
RGB color image as an NxNx3 numpy array, with the specified
data type format
"""
if stretch_object is None:
stretch_object = LuptonAsinhStretch(stretch=stretch, Q=Q)
if interval is None:
# Only use minimum if interval is not specified:
if minimum is not None:
# Backwards compatibility:
try:
len(minimum)
except TypeError:
minimum = 3 * [minimum]
if len(minimum) != 3:
raise ValueError("please provide 1 or 3 values for minimum.")
interval = []
for i in range(3):
interval.append(ManualInterval(vmin=minimum[i], vmax=None))
else:
# Default option:
interval = ManualInterval(vmin=0, vmax=None)
lup_map = RGBImageMappingLupton(
interval=interval,
stretch=stretch_object,
)
rgb = lup_map.make_rgb_image(image_r, image_g, image_b, output_dtype=output_dtype)
if filename:
import matplotlib.image
matplotlib.image.imsave(filename, rgb, origin="lower")
return rgb
| RGBImageMappingLupton |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/application/application.py | {
"start": 2989,
"end": 56075
} | class ____(Generic[_AppResult]):
"""
The main Application class!
This glues everything together.
:param layout: A :class:`~prompt_toolkit.layout.Layout` instance.
:param key_bindings:
:class:`~prompt_toolkit.key_binding.KeyBindingsBase` instance for
the key bindings.
:param clipboard: :class:`~prompt_toolkit.clipboard.Clipboard` to use.
:param full_screen: When True, run the application on the alternate screen buffer.
:param color_depth: Any :class:`~.ColorDepth` value, a callable that
returns a :class:`~.ColorDepth` or `None` for default.
:param erase_when_done: (bool) Clear the application output when it finishes.
:param reverse_vi_search_direction: Normally, in Vi mode, a '/' searches
forward and a '?' searches backward. In Readline mode, this is usually
reversed.
:param min_redraw_interval: Number of seconds to wait between redraws. Use
this for applications where `invalidate` is called a lot. This could cause
a lot of terminal output, which some terminals are not able to process.
`None` means that every `invalidate` will be scheduled right away
(which is usually fine).
When one `invalidate` is called, but a scheduled redraw of a previous
`invalidate` call has not been executed yet, nothing will happen in any
case.
:param max_render_postpone_time: When there is high CPU (a lot of other
scheduled calls), postpone the rendering max x seconds. '0' means:
don't postpone. '.5' means: try to draw at least twice a second.
:param refresh_interval: Automatically invalidate the UI every so many
seconds. When `None` (the default), only invalidate when `invalidate`
has been called.
:param terminal_size_polling_interval: Poll the terminal size every so many
seconds. Useful if the applications runs in a thread other then then
main thread where SIGWINCH can't be handled, or on Windows.
Filters:
:param mouse_support: (:class:`~prompt_toolkit.filters.Filter` or
boolean). When True, enable mouse support.
:param paste_mode: :class:`~prompt_toolkit.filters.Filter` or boolean.
:param editing_mode: :class:`~prompt_toolkit.enums.EditingMode`.
:param enable_page_navigation_bindings: When `True`, enable the page
navigation key bindings. These include both Emacs and Vi bindings like
page-up, page-down and so on to scroll through pages. Mostly useful for
creating an editor or other full screen applications. Probably, you
don't want this for the implementation of a REPL. By default, this is
enabled if `full_screen` is set.
Callbacks (all of these should accept an
:class:`~prompt_toolkit.application.Application` object as input.)
:param on_reset: Called during reset.
:param on_invalidate: Called when the UI has been invalidated.
:param before_render: Called right before rendering.
:param after_render: Called right after rendering.
I/O:
(Note that the preferred way to change the input/output is by creating an
`AppSession` with the required input/output objects. If you need multiple
applications running at the same time, you have to create a separate
`AppSession` using a `with create_app_session():` block.
:param input: :class:`~prompt_toolkit.input.Input` instance.
:param output: :class:`~prompt_toolkit.output.Output` instance. (Probably
Vt100_Output or Win32Output.)
Usage:
app = Application(...)
app.run()
# Or
await app.run_async()
"""
def __init__(
self,
layout: Layout | None = None,
style: BaseStyle | None = None,
include_default_pygments_style: FilterOrBool = True,
style_transformation: StyleTransformation | None = None,
key_bindings: KeyBindingsBase | None = None,
clipboard: Clipboard | None = None,
full_screen: bool = False,
color_depth: (ColorDepth | Callable[[], ColorDepth | None] | None) = None,
mouse_support: FilterOrBool = False,
enable_page_navigation_bindings: None
| (FilterOrBool) = None, # Can be None, True or False.
paste_mode: FilterOrBool = False,
editing_mode: EditingMode = EditingMode.EMACS,
erase_when_done: bool = False,
reverse_vi_search_direction: FilterOrBool = False,
min_redraw_interval: float | int | None = None,
max_render_postpone_time: float | int | None = 0.01,
refresh_interval: float | None = None,
terminal_size_polling_interval: float | None = 0.5,
cursor: AnyCursorShapeConfig = None,
on_reset: ApplicationEventHandler[_AppResult] | None = None,
on_invalidate: ApplicationEventHandler[_AppResult] | None = None,
before_render: ApplicationEventHandler[_AppResult] | None = None,
after_render: ApplicationEventHandler[_AppResult] | None = None,
# I/O.
input: Input | None = None,
output: Output | None = None,
) -> None:
# If `enable_page_navigation_bindings` is not specified, enable it in
# case of full screen applications only. This can be overridden by the user.
if enable_page_navigation_bindings is None:
enable_page_navigation_bindings = Condition(lambda: self.full_screen)
paste_mode = to_filter(paste_mode)
mouse_support = to_filter(mouse_support)
reverse_vi_search_direction = to_filter(reverse_vi_search_direction)
enable_page_navigation_bindings = to_filter(enable_page_navigation_bindings)
include_default_pygments_style = to_filter(include_default_pygments_style)
if layout is None:
layout = create_dummy_layout()
if style_transformation is None:
style_transformation = DummyStyleTransformation()
self.style = style
self.style_transformation = style_transformation
# Key bindings.
self.key_bindings = key_bindings
self._default_bindings = load_key_bindings()
self._page_navigation_bindings = load_page_navigation_bindings()
self.layout = layout
self.clipboard = clipboard or InMemoryClipboard()
self.full_screen: bool = full_screen
self._color_depth = color_depth
self.mouse_support = mouse_support
self.paste_mode = paste_mode
self.editing_mode = editing_mode
self.erase_when_done = erase_when_done
self.reverse_vi_search_direction = reverse_vi_search_direction
self.enable_page_navigation_bindings = enable_page_navigation_bindings
self.min_redraw_interval = min_redraw_interval
self.max_render_postpone_time = max_render_postpone_time
self.refresh_interval = refresh_interval
self.terminal_size_polling_interval = terminal_size_polling_interval
self.cursor = to_cursor_shape_config(cursor)
# Events.
self.on_invalidate = Event(self, on_invalidate)
self.on_reset = Event(self, on_reset)
self.before_render = Event(self, before_render)
self.after_render = Event(self, after_render)
# I/O.
session = get_app_session()
self.output = output or session.output
self.input = input or session.input
# List of 'extra' functions to execute before a Application.run.
self.pre_run_callables: list[Callable[[], None]] = []
self._is_running = False
self.future: Future[_AppResult] | None = None
self.loop: AbstractEventLoop | None = None
self._loop_thread: threading.Thread | None = None
self.context: contextvars.Context | None = None
#: Quoted insert. This flag is set if we go into quoted insert mode.
self.quoted_insert = False
#: Vi state. (For Vi key bindings.)
self.vi_state = ViState()
self.emacs_state = EmacsState()
#: When to flush the input (For flushing escape keys.) This is important
#: on terminals that use vt100 input. We can't distinguish the escape
#: key from for instance the left-arrow key, if we don't know what follows
#: after "\x1b". This little timer will consider "\x1b" to be escape if
#: nothing did follow in this time span.
#: This seems to work like the `ttimeoutlen` option in Vim.
self.ttimeoutlen = 0.5 # Seconds.
#: Like Vim's `timeoutlen` option. This can be `None` or a float. For
#: instance, suppose that we have a key binding AB and a second key
#: binding A. If the uses presses A and then waits, we don't handle
#: this binding yet (unless it was marked 'eager'), because we don't
#: know what will follow. This timeout is the maximum amount of time
#: that we wait until we call the handlers anyway. Pass `None` to
#: disable this timeout.
self.timeoutlen = 1.0
#: The `Renderer` instance.
# Make sure that the same stdout is used, when a custom renderer has been passed.
self._merged_style = self._create_merged_style(include_default_pygments_style)
self.renderer = Renderer(
self._merged_style,
self.output,
full_screen=full_screen,
mouse_support=mouse_support,
cpr_not_supported_callback=self.cpr_not_supported_callback,
)
#: Render counter. This one is increased every time the UI is rendered.
#: It can be used as a key for caching certain information during one
#: rendering.
self.render_counter = 0
# Invalidate flag. When 'True', a repaint has been scheduled.
self._invalidated = False
self._invalidate_events: list[
Event[object]
] = [] # Collection of 'invalidate' Event objects.
self._last_redraw_time = 0.0 # Unix timestamp of last redraw. Used when
# `min_redraw_interval` is given.
#: The `InputProcessor` instance.
self.key_processor = KeyProcessor(_CombinedRegistry(self))
# If `run_in_terminal` was called. This will point to a `Future` what will be
# set at the point when the previous run finishes.
self._running_in_terminal = False
self._running_in_terminal_f: Future[None] | None = None
# Trigger initialize callback.
self.reset()
def _create_merged_style(self, include_default_pygments_style: Filter) -> BaseStyle:
"""
Create a `Style` object that merges the default UI style, the default
pygments style, and the custom user style.
"""
dummy_style = DummyStyle()
pygments_style = default_pygments_style()
@DynamicStyle
def conditional_pygments_style() -> BaseStyle:
if include_default_pygments_style():
return pygments_style
else:
return dummy_style
return merge_styles(
[
default_ui_style(),
conditional_pygments_style,
DynamicStyle(lambda: self.style),
]
)
@property
def color_depth(self) -> ColorDepth:
"""
The active :class:`.ColorDepth`.
The current value is determined as follows:
- If a color depth was given explicitly to this application, use that
value.
- Otherwise, fall back to the color depth that is reported by the
:class:`.Output` implementation. If the :class:`.Output` class was
created using `output.defaults.create_output`, then this value is
coming from the $PROMPT_TOOLKIT_COLOR_DEPTH environment variable.
"""
depth = self._color_depth
if callable(depth):
depth = depth()
if depth is None:
depth = self.output.get_default_color_depth()
return depth
@property
def current_buffer(self) -> Buffer:
"""
The currently focused :class:`~.Buffer`.
(This returns a dummy :class:`.Buffer` when none of the actual buffers
has the focus. In this case, it's really not practical to check for
`None` values or catch exceptions every time.)
"""
return self.layout.current_buffer or Buffer(
name="dummy-buffer"
) # Dummy buffer.
@property
def current_search_state(self) -> SearchState:
"""
Return the current :class:`.SearchState`. (The one for the focused
:class:`.BufferControl`.)
"""
ui_control = self.layout.current_control
if isinstance(ui_control, BufferControl):
return ui_control.search_state
else:
return SearchState() # Dummy search state. (Don't return None!)
def reset(self) -> None:
"""
Reset everything, for reading the next input.
"""
# Notice that we don't reset the buffers. (This happens just before
# returning, and when we have multiple buffers, we clearly want the
# content in the other buffers to remain unchanged between several
# calls of `run`. (And the same is true for the focus stack.)
self.exit_style = ""
self._background_tasks: set[Task[None]] = set()
self.renderer.reset()
self.key_processor.reset()
self.layout.reset()
self.vi_state.reset()
self.emacs_state.reset()
# Trigger reset event.
self.on_reset.fire()
# Make sure that we have a 'focusable' widget focused.
# (The `Layout` class can't determine this.)
layout = self.layout
if not layout.current_control.is_focusable():
for w in layout.find_all_windows():
if w.content.is_focusable():
layout.current_window = w
break
def invalidate(self) -> None:
"""
Thread safe way of sending a repaint trigger to the input event loop.
"""
if not self._is_running:
# Don't schedule a redraw if we're not running.
# Otherwise, `get_running_loop()` in `call_soon_threadsafe` can fail.
# See: https://github.com/dbcli/mycli/issues/797
return
# `invalidate()` called if we don't have a loop yet (not running?), or
# after the event loop was closed.
if self.loop is None or self.loop.is_closed():
return
# Never schedule a second redraw, when a previous one has not yet been
# executed. (This should protect against other threads calling
# 'invalidate' many times, resulting in 100% CPU.)
if self._invalidated:
return
else:
self._invalidated = True
# Trigger event.
self.loop.call_soon_threadsafe(self.on_invalidate.fire)
def redraw() -> None:
self._invalidated = False
self._redraw()
def schedule_redraw() -> None:
call_soon_threadsafe(
redraw, max_postpone_time=self.max_render_postpone_time, loop=self.loop
)
if self.min_redraw_interval:
# When a minimum redraw interval is set, wait minimum this amount
# of time between redraws.
diff = time.time() - self._last_redraw_time
if diff < self.min_redraw_interval:
async def redraw_in_future() -> None:
await sleep(cast(float, self.min_redraw_interval) - diff)
schedule_redraw()
self.loop.call_soon_threadsafe(
lambda: self.create_background_task(redraw_in_future())
)
else:
schedule_redraw()
else:
schedule_redraw()
@property
def invalidated(self) -> bool:
"True when a redraw operation has been scheduled."
return self._invalidated
def _redraw(self, render_as_done: bool = False) -> None:
"""
Render the command line again. (Not thread safe!) (From other threads,
or if unsure, use :meth:`.Application.invalidate`.)
:param render_as_done: make sure to put the cursor after the UI.
"""
def run_in_context() -> None:
# Only draw when no sub application was started.
if self._is_running and not self._running_in_terminal:
if self.min_redraw_interval:
self._last_redraw_time = time.time()
# Render
self.render_counter += 1
self.before_render.fire()
if render_as_done:
if self.erase_when_done:
self.renderer.erase()
else:
# Draw in 'done' state and reset renderer.
self.renderer.render(self, self.layout, is_done=render_as_done)
else:
self.renderer.render(self, self.layout)
self.layout.update_parents_relations()
# Fire render event.
self.after_render.fire()
self._update_invalidate_events()
# NOTE: We want to make sure this Application is the active one. The
# invalidate function is often called from a context where this
# application is not the active one. (Like the
# `PromptSession._auto_refresh_context`).
# We copy the context in case the context was already active, to
# prevent RuntimeErrors. (The rendering is not supposed to change
# any context variables.)
if self.context is not None:
self.context.copy().run(run_in_context)
def _start_auto_refresh_task(self) -> None:
"""
Start a while/true loop in the background for automatic invalidation of
the UI.
"""
if self.refresh_interval is not None and self.refresh_interval != 0:
async def auto_refresh(refresh_interval: float) -> None:
while True:
await sleep(refresh_interval)
self.invalidate()
self.create_background_task(auto_refresh(self.refresh_interval))
def _update_invalidate_events(self) -> None:
"""
Make sure to attach 'invalidate' handlers to all invalidate events in
the UI.
"""
# Remove all the original event handlers. (Components can be removed
# from the UI.)
for ev in self._invalidate_events:
ev -= self._invalidate_handler
# Gather all new events.
# (All controls are able to invalidate themselves.)
def gather_events() -> Iterable[Event[object]]:
for c in self.layout.find_all_controls():
yield from c.get_invalidate_events()
self._invalidate_events = list(gather_events())
for ev in self._invalidate_events:
ev += self._invalidate_handler
def _invalidate_handler(self, sender: object) -> None:
"""
Handler for invalidate events coming from UIControls.
(This handles the difference in signature between event handler and
`self.invalidate`. It also needs to be a method -not a nested
function-, so that we can remove it again .)
"""
self.invalidate()
def _on_resize(self) -> None:
"""
When the window size changes, we erase the current output and request
again the cursor position. When the CPR answer arrives, the output is
drawn again.
"""
# Erase, request position (when cursor is at the start position)
# and redraw again. -- The order is important.
self.renderer.erase(leave_alternate_screen=False)
self._request_absolute_cursor_position()
self._redraw()
def _pre_run(self, pre_run: Callable[[], None] | None = None) -> None:
"""
Called during `run`.
`self.future` should be set to the new future at the point where this
is called in order to avoid data races. `pre_run` can be used to set a
`threading.Event` to synchronize with UI termination code, running in
another thread that would call `Application.exit`. (See the progress
bar code for an example.)
"""
if pre_run:
pre_run()
# Process registered "pre_run_callables" and clear list.
for c in self.pre_run_callables:
c()
del self.pre_run_callables[:]
async def run_async(
self,
pre_run: Callable[[], None] | None = None,
set_exception_handler: bool = True,
handle_sigint: bool = True,
slow_callback_duration: float = 0.5,
) -> _AppResult:
"""
Run the prompt_toolkit :class:`~prompt_toolkit.application.Application`
until :meth:`~prompt_toolkit.application.Application.exit` has been
called. Return the value that was passed to
:meth:`~prompt_toolkit.application.Application.exit`.
This is the main entry point for a prompt_toolkit
:class:`~prompt_toolkit.application.Application` and usually the only
place where the event loop is actually running.
:param pre_run: Optional callable, which is called right after the
"reset" of the application.
:param set_exception_handler: When set, in case of an exception, go out
of the alternate screen and hide the application, display the
exception, and wait for the user to press ENTER.
:param handle_sigint: Handle SIGINT signal if possible. This will call
the `<sigint>` key binding when a SIGINT is received. (This only
works in the main thread.)
:param slow_callback_duration: Display warnings if code scheduled in
the asyncio event loop takes more time than this. The asyncio
default of `0.1` is sometimes not sufficient on a slow system,
because exceptionally, the drawing of the app, which happens in the
event loop, can take a bit longer from time to time.
"""
assert not self._is_running, "Application is already running."
if not in_main_thread() or sys.platform == "win32":
# Handling signals in other threads is not supported.
# Also on Windows, `add_signal_handler(signal.SIGINT, ...)` raises
# `NotImplementedError`.
# See: https://github.com/prompt-toolkit/python-prompt-toolkit/issues/1553
handle_sigint = False
async def _run_async(f: asyncio.Future[_AppResult]) -> _AppResult:
context = contextvars.copy_context()
self.context = context
# Counter for cancelling 'flush' timeouts. Every time when a key is
# pressed, we start a 'flush' timer for flushing our escape key. But
# when any subsequent input is received, a new timer is started and
# the current timer will be ignored.
flush_task: asyncio.Task[None] | None = None
# Reset.
# (`self.future` needs to be set when `pre_run` is called.)
self.reset()
self._pre_run(pre_run)
# Feed type ahead input first.
self.key_processor.feed_multiple(get_typeahead(self.input))
self.key_processor.process_keys()
def read_from_input() -> None:
nonlocal flush_task
# Ignore when we aren't running anymore. This callback will
# removed from the loop next time. (It could be that it was
# still in the 'tasks' list of the loop.)
# Except: if we need to process incoming CPRs.
if not self._is_running and not self.renderer.waiting_for_cpr:
return
# Get keys from the input object.
keys = self.input.read_keys()
# Feed to key processor.
self.key_processor.feed_multiple(keys)
self.key_processor.process_keys()
# Quit when the input stream was closed.
if self.input.closed:
if not f.done():
f.set_exception(EOFError)
else:
# Automatically flush keys.
if flush_task:
flush_task.cancel()
flush_task = self.create_background_task(auto_flush_input())
def read_from_input_in_context() -> None:
# Ensure that key bindings callbacks are always executed in the
# current context. This is important when key bindings are
# accessing contextvars. (These callbacks are currently being
# called from a different context. Underneath,
# `loop.add_reader` is used to register the stdin FD.)
# (We copy the context to avoid a `RuntimeError` in case the
# context is already active.)
context.copy().run(read_from_input)
async def auto_flush_input() -> None:
# Flush input after timeout.
# (Used for flushing the enter key.)
# This sleep can be cancelled, in that case we won't flush yet.
await sleep(self.ttimeoutlen)
flush_input()
def flush_input() -> None:
if not self.is_done:
# Get keys, and feed to key processor.
keys = self.input.flush_keys()
self.key_processor.feed_multiple(keys)
self.key_processor.process_keys()
if self.input.closed:
f.set_exception(EOFError)
# Enter raw mode, attach input and attach WINCH event handler.
with self.input.raw_mode(), self.input.attach(
read_from_input_in_context
), attach_winch_signal_handler(self._on_resize):
# Draw UI.
self._request_absolute_cursor_position()
self._redraw()
self._start_auto_refresh_task()
self.create_background_task(self._poll_output_size())
# Wait for UI to finish.
try:
result = await f
finally:
# In any case, when the application finishes.
# (Successful, or because of an error.)
try:
self._redraw(render_as_done=True)
finally:
# _redraw has a good chance to fail if it calls widgets
# with bad code. Make sure to reset the renderer
# anyway.
self.renderer.reset()
# Unset `is_running`, this ensures that possibly
# scheduled draws won't paint during the following
# yield.
self._is_running = False
# Detach event handlers for invalidate events.
# (Important when a UIControl is embedded in multiple
# applications, like ptterm in pymux. An invalidate
# should not trigger a repaint in terminated
# applications.)
for ev in self._invalidate_events:
ev -= self._invalidate_handler
self._invalidate_events = []
# Wait for CPR responses.
if self.output.responds_to_cpr:
await self.renderer.wait_for_cpr_responses()
# Wait for the run-in-terminals to terminate.
previous_run_in_terminal_f = self._running_in_terminal_f
if previous_run_in_terminal_f:
await previous_run_in_terminal_f
# Store unprocessed input as typeahead for next time.
store_typeahead(self.input, self.key_processor.empty_queue())
return result
@contextmanager
def set_loop() -> Iterator[AbstractEventLoop]:
loop = get_running_loop()
self.loop = loop
self._loop_thread = threading.current_thread()
try:
yield loop
finally:
self.loop = None
self._loop_thread = None
@contextmanager
def set_is_running() -> Iterator[None]:
self._is_running = True
try:
yield
finally:
self._is_running = False
@contextmanager
def set_handle_sigint(loop: AbstractEventLoop) -> Iterator[None]:
if handle_sigint:
with _restore_sigint_from_ctypes():
# save sigint handlers (python and os level)
# See: https://github.com/prompt-toolkit/python-prompt-toolkit/issues/1576
loop.add_signal_handler(
signal.SIGINT,
lambda *_: loop.call_soon_threadsafe(
self.key_processor.send_sigint
),
)
try:
yield
finally:
loop.remove_signal_handler(signal.SIGINT)
else:
yield
@contextmanager
def set_exception_handler_ctx(loop: AbstractEventLoop) -> Iterator[None]:
if set_exception_handler:
previous_exc_handler = loop.get_exception_handler()
loop.set_exception_handler(self._handle_exception)
try:
yield
finally:
loop.set_exception_handler(previous_exc_handler)
else:
yield
@contextmanager
def set_callback_duration(loop: AbstractEventLoop) -> Iterator[None]:
# Set slow_callback_duration.
original_slow_callback_duration = loop.slow_callback_duration
loop.slow_callback_duration = slow_callback_duration
try:
yield
finally:
# Reset slow_callback_duration.
loop.slow_callback_duration = original_slow_callback_duration
@contextmanager
def create_future(
loop: AbstractEventLoop,
) -> Iterator[asyncio.Future[_AppResult]]:
f = loop.create_future()
self.future = f # XXX: make sure to set this before calling '_redraw'.
try:
yield f
finally:
# Also remove the Future again. (This brings the
# application back to its initial state, where it also
# doesn't have a Future.)
self.future = None
with ExitStack() as stack:
stack.enter_context(set_is_running())
# Make sure to set `_invalidated` to `False` to begin with,
# otherwise we're not going to paint anything. This can happen if
# this application had run before on a different event loop, and a
# paint was scheduled using `call_soon_threadsafe` with
# `max_postpone_time`.
self._invalidated = False
loop = stack.enter_context(set_loop())
stack.enter_context(set_handle_sigint(loop))
stack.enter_context(set_exception_handler_ctx(loop))
stack.enter_context(set_callback_duration(loop))
stack.enter_context(set_app(self))
stack.enter_context(self._enable_breakpointhook())
f = stack.enter_context(create_future(loop))
try:
return await _run_async(f)
finally:
# Wait for the background tasks to be done. This needs to
# go in the finally! If `_run_async` raises
# `KeyboardInterrupt`, we still want to wait for the
# background tasks.
await self.cancel_and_wait_for_background_tasks()
# The `ExitStack` above is defined in typeshed in a way that it can
# swallow exceptions. Without next line, mypy would think that there's
# a possibility we don't return here. See:
# https://github.com/python/mypy/issues/7726
assert False, "unreachable"
def run(
self,
pre_run: Callable[[], None] | None = None,
set_exception_handler: bool = True,
handle_sigint: bool = True,
in_thread: bool = False,
inputhook: InputHook | None = None,
) -> _AppResult:
"""
A blocking 'run' call that waits until the UI is finished.
This will run the application in a fresh asyncio event loop.
:param pre_run: Optional callable, which is called right after the
"reset" of the application.
:param set_exception_handler: When set, in case of an exception, go out
of the alternate screen and hide the application, display the
exception, and wait for the user to press ENTER.
:param in_thread: When true, run the application in a background
thread, and block the current thread until the application
terminates. This is useful if we need to be sure the application
won't use the current event loop (asyncio does not support nested
event loops). A new event loop will be created in this background
thread, and that loop will also be closed when the background
thread terminates. When this is used, it's especially important to
make sure that all asyncio background tasks are managed through
`get_appp().create_background_task()`, so that unfinished tasks are
properly cancelled before the event loop is closed. This is used
for instance in ptpython.
:param handle_sigint: Handle SIGINT signal. Call the key binding for
`Keys.SIGINT`. (This only works in the main thread.)
"""
if in_thread:
result: _AppResult
exception: BaseException | None = None
def run_in_thread() -> None:
nonlocal result, exception
try:
result = self.run(
pre_run=pre_run,
set_exception_handler=set_exception_handler,
# Signal handling only works in the main thread.
handle_sigint=False,
inputhook=inputhook,
)
except BaseException as e:
exception = e
thread = threading.Thread(target=run_in_thread)
thread.start()
thread.join()
if exception is not None:
raise exception
return result
coro = self.run_async(
pre_run=pre_run,
set_exception_handler=set_exception_handler,
handle_sigint=handle_sigint,
)
def _called_from_ipython() -> bool:
try:
return (
sys.modules["IPython"].version_info < (8, 18, 0, "")
and "IPython/terminal/interactiveshell.py"
in sys._getframe(3).f_code.co_filename
)
except BaseException:
return False
if inputhook is not None:
# Create new event loop with given input hook and run the app.
# In Python 3.12, we can use asyncio.run(loop_factory=...)
# For now, use `run_until_complete()`.
loop = new_eventloop_with_inputhook(inputhook)
result = loop.run_until_complete(coro)
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
return result
elif _called_from_ipython():
# workaround to make input hooks work for IPython until
# https://github.com/ipython/ipython/pull/14241 is merged.
# IPython was setting the input hook by installing an event loop
# previously.
try:
# See whether a loop was installed already. If so, use that.
# That's required for the input hooks to work, they are
# installed using `set_event_loop`.
loop = asyncio.get_event_loop()
except RuntimeError:
# No loop installed. Run like usual.
return asyncio.run(coro)
else:
# Use existing loop.
return loop.run_until_complete(coro)
else:
# No loop installed. Run like usual.
return asyncio.run(coro)
def _handle_exception(
self, loop: AbstractEventLoop, context: dict[str, Any]
) -> None:
"""
Handler for event loop exceptions.
This will print the exception, using run_in_terminal.
"""
# For Python 2: we have to get traceback at this point, because
# we're still in the 'except:' block of the event loop where the
# traceback is still available. Moving this code in the
# 'print_exception' coroutine will loose the exception.
tb = get_traceback_from_context(context)
formatted_tb = "".join(format_tb(tb))
async def in_term() -> None:
async with in_terminal():
# Print output. Similar to 'loop.default_exception_handler',
# but don't use logger. (This works better on Python 2.)
print("\nUnhandled exception in event loop:")
print(formatted_tb)
print("Exception {}".format(context.get("exception")))
await _do_wait_for_enter("Press ENTER to continue...")
ensure_future(in_term())
@contextmanager
def _enable_breakpointhook(self) -> Generator[None, None, None]:
"""
Install our custom breakpointhook for the duration of this context
manager. (We will only install the hook if no other custom hook was
set.)
"""
if sys.breakpointhook == sys.__breakpointhook__:
sys.breakpointhook = self._breakpointhook
try:
yield
finally:
sys.breakpointhook = sys.__breakpointhook__
else:
yield
def _breakpointhook(self, *a: object, **kw: object) -> None:
"""
Breakpointhook which uses PDB, but ensures that the application is
hidden and input echoing is restored during each debugger dispatch.
This can be called from any thread. In any case, the application's
event loop will be blocked while the PDB input is displayed. The event
will continue after leaving the debugger.
"""
app = self
# Inline import on purpose. We don't want to import pdb, if not needed.
import pdb
from types import FrameType
TraceDispatch = Callable[[FrameType, str, Any], Any]
@contextmanager
def hide_app_from_eventloop_thread() -> Generator[None, None, None]:
"""Stop application if `__breakpointhook__` is called from within
the App's event loop."""
# Hide application.
app.renderer.erase()
# Detach input and dispatch to debugger.
with app.input.detach():
with app.input.cooked_mode():
yield
# Note: we don't render the application again here, because
# there's a good chance that there's a breakpoint on the next
# line. This paint/erase cycle would move the PDB prompt back
# to the middle of the screen.
@contextmanager
def hide_app_from_other_thread() -> Generator[None, None, None]:
"""Stop application if `__breakpointhook__` is called from a
thread other than the App's event loop."""
ready = threading.Event()
done = threading.Event()
async def in_loop() -> None:
# from .run_in_terminal import in_terminal
# async with in_terminal():
# ready.set()
# await asyncio.get_running_loop().run_in_executor(None, done.wait)
# return
# Hide application.
app.renderer.erase()
# Detach input and dispatch to debugger.
with app.input.detach():
with app.input.cooked_mode():
ready.set()
# Here we block the App's event loop thread until the
# debugger resumes. We could have used `with
# run_in_terminal.in_terminal():` like the commented
# code above, but it seems to work better if we
# completely stop the main event loop while debugging.
done.wait()
self.create_background_task(in_loop())
ready.wait()
try:
yield
finally:
done.set()
class CustomPdb(pdb.Pdb):
def trace_dispatch(
self, frame: FrameType, event: str, arg: Any
) -> TraceDispatch:
if app._loop_thread is None:
return super().trace_dispatch(frame, event, arg)
if app._loop_thread == threading.current_thread():
with hide_app_from_eventloop_thread():
return super().trace_dispatch(frame, event, arg)
with hide_app_from_other_thread():
return super().trace_dispatch(frame, event, arg)
frame = sys._getframe().f_back
CustomPdb(stdout=sys.__stdout__).set_trace(frame)
def create_background_task(
self, coroutine: Coroutine[Any, Any, None]
) -> asyncio.Task[None]:
"""
Start a background task (coroutine) for the running application. When
the `Application` terminates, unfinished background tasks will be
cancelled.
Given that we still support Python versions before 3.11, we can't use
task groups (and exception groups), because of that, these background
tasks are not allowed to raise exceptions. If they do, we'll call the
default exception handler from the event loop.
If at some point, we have Python 3.11 as the minimum supported Python
version, then we can use a `TaskGroup` (with the lifetime of
`Application.run_async()`, and run run the background tasks in there.
This is not threadsafe.
"""
loop = self.loop or get_running_loop()
task: asyncio.Task[None] = loop.create_task(coroutine)
self._background_tasks.add(task)
task.add_done_callback(self._on_background_task_done)
return task
def _on_background_task_done(self, task: asyncio.Task[None]) -> None:
"""
Called when a background task completes. Remove it from
`_background_tasks`, and handle exceptions if any.
"""
self._background_tasks.discard(task)
if task.cancelled():
return
exc = task.exception()
if exc is not None:
get_running_loop().call_exception_handler(
{
"message": f"prompt_toolkit.Application background task {task!r} "
"raised an unexpected exception.",
"exception": exc,
"task": task,
}
)
async def cancel_and_wait_for_background_tasks(self) -> None:
"""
Cancel all background tasks, and wait for the cancellation to complete.
If any of the background tasks raised an exception, this will also
propagate the exception.
(If we had nurseries like Trio, this would be the `__aexit__` of a
nursery.)
"""
for task in self._background_tasks:
task.cancel()
# Wait until the cancellation of the background tasks completes.
# `asyncio.wait()` does not propagate exceptions raised within any of
# these tasks, which is what we want. Otherwise, we can't distinguish
# between a `CancelledError` raised in this task because it got
# cancelled, and a `CancelledError` raised on this `await` checkpoint,
# because *we* got cancelled during the teardown of the application.
# (If we get cancelled here, then it's important to not suppress the
# `CancelledError`, and have it propagate.)
# NOTE: Currently, if we get cancelled at this point then we can't wait
# for the cancellation to complete (in the future, we should be
# using anyio or Python's 3.11 TaskGroup.)
# Also, if we had exception groups, we could propagate an
# `ExceptionGroup` if something went wrong here. Right now, we
# don't propagate exceptions, but have them printed in
# `_on_background_task_done`.
if len(self._background_tasks) > 0:
await asyncio.wait(
self._background_tasks, timeout=None, return_when=asyncio.ALL_COMPLETED
)
async def _poll_output_size(self) -> None:
"""
Coroutine for polling the terminal dimensions.
Useful for situations where `attach_winch_signal_handler` is not sufficient:
- If we are not running in the main thread.
- On Windows.
"""
size: Size | None = None
interval = self.terminal_size_polling_interval
if interval is None:
return
while True:
await asyncio.sleep(interval)
new_size = self.output.get_size()
if size is not None and new_size != size:
self._on_resize()
size = new_size
def cpr_not_supported_callback(self) -> None:
"""
Called when we don't receive the cursor position response in time.
"""
if not self.output.responds_to_cpr:
return # We know about this already.
def in_terminal() -> None:
self.output.write(
"WARNING: your terminal doesn't support cursor position requests (CPR).\r\n"
)
self.output.flush()
run_in_terminal(in_terminal)
@overload
def exit(self) -> None:
"Exit without arguments."
@overload
def exit(self, *, result: _AppResult, style: str = "") -> None:
"Exit with `_AppResult`."
@overload
def exit(
self, *, exception: BaseException | type[BaseException], style: str = ""
) -> None:
"Exit with exception."
def exit(
self,
result: _AppResult | None = None,
exception: BaseException | type[BaseException] | None = None,
style: str = "",
) -> None:
"""
Exit application.
.. note::
If `Application.exit` is called before `Application.run()` is
called, then the `Application` won't exit (because the
`Application.future` doesn't correspond to the current run). Use a
`pre_run` hook and an event to synchronize the closing if there's a
chance this can happen.
:param result: Set this result for the application.
:param exception: Set this exception as the result for an application. For
a prompt, this is often `EOFError` or `KeyboardInterrupt`.
:param style: Apply this style on the whole content when quitting,
often this is 'class:exiting' for a prompt. (Used when
`erase_when_done` is not set.)
"""
assert result is None or exception is None
if self.future is None:
raise Exception("Application is not running. Application.exit() failed.")
if self.future.done():
raise Exception("Return value already set. Application.exit() failed.")
self.exit_style = style
if exception is not None:
self.future.set_exception(exception)
else:
self.future.set_result(cast(_AppResult, result))
def _request_absolute_cursor_position(self) -> None:
"""
Send CPR request.
"""
# Note: only do this if the input queue is not empty, and a return
# value has not been set. Otherwise, we won't be able to read the
# response anyway.
if not self.key_processor.input_queue and not self.is_done:
self.renderer.request_absolute_cursor_position()
async def run_system_command(
self,
command: str,
wait_for_enter: bool = True,
display_before_text: AnyFormattedText = "",
wait_text: str = "Press ENTER to continue...",
) -> None:
"""
Run system command (While hiding the prompt. When finished, all the
output will scroll above the prompt.)
:param command: Shell command to be executed.
:param wait_for_enter: FWait for the user to press enter, when the
command is finished.
:param display_before_text: If given, text to be displayed before the
command executes.
:return: A `Future` object.
"""
async with in_terminal():
# Try to use the same input/output file descriptors as the one,
# used to run this application.
try:
input_fd = self.input.fileno()
except AttributeError:
input_fd = sys.stdin.fileno()
try:
output_fd = self.output.fileno()
except AttributeError:
output_fd = sys.stdout.fileno()
# Run sub process.
def run_command() -> None:
self.print_text(display_before_text)
p = Popen(command, shell=True, stdin=input_fd, stdout=output_fd)
p.wait()
await run_in_executor_with_context(run_command)
# Wait for the user to press enter.
if wait_for_enter:
await _do_wait_for_enter(wait_text)
def suspend_to_background(self, suspend_group: bool = True) -> None:
"""
(Not thread safe -- to be called from inside the key bindings.)
Suspend process.
:param suspend_group: When true, suspend the whole process group.
(This is the default, and probably what you want.)
"""
# Only suspend when the operating system supports it.
# (Not on Windows.)
if _SIGTSTP is not None:
def run() -> None:
signal = cast(int, _SIGTSTP)
# Send `SIGTSTP` to own process.
# This will cause it to suspend.
# Usually we want the whole process group to be suspended. This
# handles the case when input is piped from another process.
if suspend_group:
os.kill(0, signal)
else:
os.kill(os.getpid(), signal)
run_in_terminal(run)
def print_text(
self, text: AnyFormattedText, style: BaseStyle | None = None
) -> None:
"""
Print a list of (style_str, text) tuples to the output.
(When the UI is running, this method has to be called through
`run_in_terminal`, otherwise it will destroy the UI.)
:param text: List of ``(style_str, text)`` tuples.
:param style: Style class to use. Defaults to the active style in the CLI.
"""
print_formatted_text(
output=self.output,
formatted_text=text,
style=style or self._merged_style,
color_depth=self.color_depth,
style_transformation=self.style_transformation,
)
@property
def is_running(self) -> bool:
"`True` when the application is currently active/running."
return self._is_running
@property
def is_done(self) -> bool:
if self.future:
return self.future.done()
return False
def get_used_style_strings(self) -> list[str]:
"""
Return a list of used style strings. This is helpful for debugging, and
for writing a new `Style`.
"""
attrs_for_style = self.renderer._attrs_for_style
if attrs_for_style:
return sorted(
re.sub(r"\s+", " ", style_str).strip()
for style_str in attrs_for_style.keys()
)
return []
| Application |
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 29861,
"end": 32149
} | class ____(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = (height % 2 == 1) or (width % 2 == 1)
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
# `dim` is height * width
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
# pad input to be divisible by width and height, if needed
input_feature = self.maybe_pad(input_feature, height, width)
# [batch_size, height/2, width/2, num_channels]
input_feature_0 = input_feature[:, 0::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
# batch_size height/2 width/2 4*num_channels
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature
| ClapAudioPatchMerging |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 641429,
"end": 641771
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("count", "state")
count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="count")
state = sgqlc.types.Field(sgqlc.types.non_null(StatusState), graphql_name="state")
| StatusContextStateCount |
python | jazzband__django-model-utils | tests/models.py | {
"start": 5153,
"end": 5323
} | class ____(AbstractCustomManagerStatusModel):
"""A concrete status model with a custom manager."""
title = models.CharField(max_length=50)
| CustomManagerStatusModel |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_emr_serverless_application.py | {
"start": 1009,
"end": 1734
} | class ____:
def setup_method(self):
self.app_id = "vzwemreks"
self.job_run_id = "job1234"
self.sensor = EmrServerlessApplicationSensor(
task_id="test_emrcontainer_sensor",
application_id=self.app_id,
aws_conn_id="aws_default",
)
def set_get_application_return_value(self, return_value: dict[str, str]):
self.mock_hook = MagicMock()
self.mock_hook.conn.get_application.return_value = return_value
self.sensor.hook = self.mock_hook
def assert_get_application_was_called_once_with_app_id(self):
self.mock_hook.conn.get_application.assert_called_once_with(applicationId=self.app_id)
| TestEmrServerlessApplicationSensor |
python | scrapy__scrapy | scrapy/commands/version.py | {
"start": 122,
"end": 1044
} | class ____(ScrapyCommand):
requires_crawler_process = False
default_settings = {"LOG_ENABLED": False}
def syntax(self) -> str:
return "[-v]"
def short_desc(self) -> str:
return "Print Scrapy version"
def add_options(self, parser: argparse.ArgumentParser) -> None:
super().add_options(parser)
parser.add_argument(
"--verbose",
"-v",
dest="verbose",
action="store_true",
help="also display twisted/python/platform info (useful for bug reports)",
)
def run(self, args: list[str], opts: argparse.Namespace) -> None:
if opts.verbose:
versions = get_versions()
width = max(len(n) for (n, _) in versions)
for name, version in versions:
print(f"{name:<{width}} : {version}")
else:
print(f"Scrapy {scrapy.__version__}")
| Command |
python | pytorch__pytorch | torch/_inductor/select_algorithm.py | {
"start": 2890,
"end": 3045
} | class ____:
pass
# these objects are imported from the generated wrapper code
extern_kernels = KernelNamespace()
@dataclasses.dataclass
| KernelNamespace |
python | apache__airflow | providers/dbt/cloud/tests/unit/dbt/cloud/triggers/test_dbt.py | {
"start": 1135,
"end": 12113
} | class ____:
DAG_ID = "dbt_cloud_run"
TASK_ID = "dbt_cloud_run_task_op"
RUN_ID = 1234
CONN_ID = "dbt_cloud_default"
ACCOUNT_ID = 12340
END_TIME = time.time() + 60 * 60 * 24 * 7
POLL_INTERVAL = 3.0
def test_serialization(self):
"""Assert DbtCloudRunJobTrigger correctly serializes its arguments and classpath."""
trigger = DbtCloudRunJobTrigger(
conn_id=self.CONN_ID,
poll_interval=self.POLL_INTERVAL,
end_time=self.END_TIME,
run_id=self.RUN_ID,
account_id=self.ACCOUNT_ID,
hook_params={"retry_delay": 10},
)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.dbt.cloud.triggers.dbt.DbtCloudRunJobTrigger"
assert kwargs == {
"run_id": self.RUN_ID,
"account_id": self.ACCOUNT_ID,
"conn_id": self.CONN_ID,
"end_time": self.END_TIME,
"poll_interval": self.POLL_INTERVAL,
"hook_params": {"retry_delay": 10},
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.dbt.cloud.triggers.dbt.DbtCloudRunJobTrigger.is_still_running")
async def test_dbt_run_job_trigger(self, mocked_is_still_running):
"""Test DbtCloudRunJobTrigger is triggered with mocked details and run successfully."""
mocked_is_still_running.return_value = True
trigger = DbtCloudRunJobTrigger(
conn_id=self.CONN_ID,
poll_interval=self.POLL_INTERVAL,
end_time=self.END_TIME,
run_id=self.RUN_ID,
account_id=self.ACCOUNT_ID,
)
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@pytest.mark.parametrize(
("mock_value", "mock_status", "mock_message"),
[
(DbtCloudJobRunStatus.SUCCESS.value, "success", "Job run 1234 has completed successfully."),
],
)
@mock.patch("airflow.providers.dbt.cloud.triggers.dbt.DbtCloudRunJobTrigger.is_still_running")
@mock.patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_status")
async def test_dbt_job_run_for_terminal_status_success(
self, mock_get_job_status, mocked_is_still_running, mock_value, mock_status, mock_message
):
"""Assert that run trigger success message in case of job success"""
mocked_is_still_running.return_value = False
mock_get_job_status.return_value = mock_value
trigger = DbtCloudRunJobTrigger(
conn_id=self.CONN_ID,
poll_interval=self.POLL_INTERVAL,
end_time=self.END_TIME,
run_id=self.RUN_ID,
account_id=self.ACCOUNT_ID,
)
expected_result = {
"status": mock_status,
"message": mock_message,
"run_id": self.RUN_ID,
}
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
assert TriggerEvent(expected_result) == task.result()
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@pytest.mark.parametrize(
("mock_value", "mock_status", "mock_message"),
[
(DbtCloudJobRunStatus.CANCELLED.value, "cancelled", "Job run 1234 has been cancelled."),
],
)
@mock.patch("airflow.providers.dbt.cloud.triggers.dbt.DbtCloudRunJobTrigger.is_still_running")
@mock.patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_status")
async def test_dbt_job_run_for_terminal_status_cancelled(
self, mock_get_job_status, mocked_is_still_running, mock_value, mock_status, mock_message
):
"""Assert that run trigger success message in case of job success"""
mocked_is_still_running.return_value = False
mock_get_job_status.return_value = mock_value
trigger = DbtCloudRunJobTrigger(
conn_id=self.CONN_ID,
poll_interval=self.POLL_INTERVAL,
end_time=self.END_TIME,
run_id=self.RUN_ID,
account_id=self.ACCOUNT_ID,
)
expected_result = {
"status": mock_status,
"message": mock_message,
"run_id": self.RUN_ID,
}
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
assert TriggerEvent(expected_result) == task.result()
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@pytest.mark.parametrize(
("mock_value", "mock_status", "mock_message"),
[
(DbtCloudJobRunStatus.ERROR.value, "error", "Job run 1234 has failed."),
],
)
@mock.patch("airflow.providers.dbt.cloud.triggers.dbt.DbtCloudRunJobTrigger.is_still_running")
@mock.patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_status")
async def test_dbt_job_run_for_terminal_status_error(
self, mock_get_job_status, mocked_is_still_running, mock_value, mock_status, mock_message
):
"""Assert that run trigger success message in case of job success"""
mocked_is_still_running.return_value = False
mock_get_job_status.return_value = mock_value
trigger = DbtCloudRunJobTrigger(
conn_id=self.CONN_ID,
poll_interval=self.POLL_INTERVAL,
end_time=self.END_TIME,
run_id=self.RUN_ID,
account_id=self.ACCOUNT_ID,
)
expected_result = {
"status": mock_status,
"message": mock_message,
"run_id": self.RUN_ID,
}
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
assert TriggerEvent(expected_result) == task.result()
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.dbt.cloud.triggers.dbt.DbtCloudRunJobTrigger.is_still_running")
@mock.patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_status")
async def test_dbt_job_run_exception(self, mock_get_job_status, mocked_is_still_running):
"""Assert that run catch exception if dbt cloud job API throw exception"""
mocked_is_still_running.return_value = False
mock_get_job_status.side_effect = Exception("Test exception")
trigger = DbtCloudRunJobTrigger(
conn_id=self.CONN_ID,
poll_interval=self.POLL_INTERVAL,
end_time=self.END_TIME,
run_id=self.RUN_ID,
account_id=self.ACCOUNT_ID,
)
task = [i async for i in trigger.run()]
response = TriggerEvent(
{
"status": "error",
"message": "Test exception",
"run_id": self.RUN_ID,
}
)
assert len(task) == 1
assert response in task
@pytest.mark.asyncio
@mock.patch("airflow.providers.dbt.cloud.triggers.dbt.DbtCloudRunJobTrigger.is_still_running")
@mock.patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_status")
async def test_dbt_job_run_timeout(self, mock_get_job_status, mocked_is_still_running):
"""Assert that run timeout after end_time elapsed"""
mocked_is_still_running.return_value = True
mock_get_job_status.side_effect = Exception("Test exception")
end_time = time.time()
trigger = DbtCloudRunJobTrigger(
conn_id=self.CONN_ID,
poll_interval=self.POLL_INTERVAL,
end_time=end_time,
run_id=self.RUN_ID,
account_id=self.ACCOUNT_ID,
)
generator = trigger.run()
actual = await generator.asend(None)
expected = TriggerEvent(
{
"status": "error",
"message": f"Job run {self.RUN_ID} has not reached a terminal status "
f"after {end_time} seconds.",
"run_id": self.RUN_ID,
}
)
assert expected == actual
@pytest.mark.asyncio
@pytest.mark.parametrize(
("mock_response", "expected_status"),
[
(DbtCloudJobRunStatus.SUCCESS.value, False),
],
)
@mock.patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_status")
async def test_dbt_job_run_is_still_running_success(
self, mock_get_job_status, mock_response, expected_status
):
"""Test is_still_running with mocked response job status and assert
the return response with expected value"""
hook = AsyncMock(DbtCloudHook)
hook.get_job_status.return_value = mock_response
trigger = DbtCloudRunJobTrigger(
conn_id=self.CONN_ID,
poll_interval=self.POLL_INTERVAL,
end_time=self.END_TIME,
run_id=self.RUN_ID,
account_id=self.ACCOUNT_ID,
)
response = await trigger.is_still_running(hook)
assert response == expected_status
@pytest.mark.asyncio
@pytest.mark.parametrize(
("mock_response", "expected_status"),
[
(DbtCloudJobRunStatus.RUNNING.value, True),
],
)
@mock.patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_status")
async def test_dbt_job_run_is_still_running(self, mock_get_job_status, mock_response, expected_status):
"""Test is_still_running with mocked response job status and assert
the return response with expected value"""
hook = AsyncMock(DbtCloudHook)
hook.get_job_status.return_value = mock_response
trigger = DbtCloudRunJobTrigger(
conn_id=self.CONN_ID,
poll_interval=self.POLL_INTERVAL,
end_time=self.END_TIME,
run_id=self.RUN_ID,
account_id=self.ACCOUNT_ID,
)
response = await trigger.is_still_running(hook)
assert response == expected_status
@pytest.mark.asyncio
@pytest.mark.parametrize(
("mock_response", "expected_status"),
[
(DbtCloudJobRunStatus.QUEUED.value, True),
],
)
@mock.patch("airflow.providers.dbt.cloud.hooks.dbt.DbtCloudHook.get_job_status")
async def test_dbt_job_run_is_still_running_queued(
self, mock_get_job_status, mock_response, expected_status
):
"""Test is_still_running with mocked response job status and assert
the return response with expected value"""
hook = AsyncMock(DbtCloudHook)
hook.get_job_status.return_value = mock_response
trigger = DbtCloudRunJobTrigger(
conn_id=self.CONN_ID,
poll_interval=self.POLL_INTERVAL,
end_time=self.END_TIME,
run_id=self.RUN_ID,
account_id=self.ACCOUNT_ID,
)
response = await trigger.is_still_running(hook)
assert response == expected_status
| TestDbtCloudRunJobTrigger |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_table01.py | {
"start": 315,
"end": 1371
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_table01.xlsx")
def test_create_file(self):
"""Test XlsxWriter chart axis table properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [61355520, 61357056]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_table()
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | django/test/utils.py | {
"start": 15394,
"end": 18467
} | class ____(TestContextDecorator):
"""
Act as either a decorator or a context manager. If it's a decorator, take a
function and return a wrapped function. If it's a contextmanager, use it
with the ``with`` statement. In either event, entering/exiting are called
before and after, respectively, the function/block is executed.
"""
enable_exception = None
def __init__(self, **kwargs):
self.options = kwargs
super().__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if "INSTALLED_APPS" in self.options:
try:
apps.set_installed_apps(self.options["INSTALLED_APPS"])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
try:
setting_changed.send(
sender=settings._wrapped.__class__,
setting=key,
value=new_value,
enter=True,
)
except Exception as exc:
self.enable_exception = exc
self.disable()
def disable(self):
if "INSTALLED_APPS" in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
responses = []
for key in self.options:
new_value = getattr(settings, key, None)
responses_for_setting = setting_changed.send_robust(
sender=settings._wrapped.__class__,
setting=key,
value=new_value,
enter=False,
)
responses.extend(responses_for_setting)
if self.enable_exception is not None:
exc = self.enable_exception
self.enable_exception = None
raise exc
for _, response in responses:
if isinstance(response, Exception):
raise response
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = {
**test_func._overridden_settings,
**self.options,
}
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings"
)
self.save_options(cls)
return cls
| override_settings |
python | Textualize__textual | docs/examples/guide/styles/dimensions02.py | {
"start": 401,
"end": 760
} | class ____(App):
def compose(self) -> ComposeResult:
self.widget = Static(TEXT)
yield self.widget
def on_mount(self) -> None:
self.widget.styles.background = "purple"
self.widget.styles.width = 30
self.widget.styles.height = "auto"
if __name__ == "__main__":
app = DimensionsApp()
app.run()
| DimensionsApp |
python | tensorflow__tensorflow | tensorflow/python/ops/dequantize_op_test.py | {
"start": 938,
"end": 5705
} | class ____(test.TestCase):
def __init__(self, method_name="runTest"):
super(DequantizeOpTest, self).__init__(method_name)
def _testDequantizeOp(self, inputs, min_range, max_range, dtype,
mode="MIN_COMBINED", narrow_range=False):
with self.cached_session():
input_op = constant_op.constant(inputs, shape=[len(inputs)], dtype=dtype)
dequantized = array_ops.dequantize(input_op, min_range, max_range,
mode=mode, narrow_range=narrow_range)
tf_ans = self.evaluate(dequantized)
# TODO(vrv): Add support for DT_QINT32 quantization if needed.
type_dict = {
dtypes.quint8: np.uint8,
dtypes.qint8: np.int8,
dtypes.quint16: np.uint16,
dtypes.qint16: np.int16
}
self.assertIn(dtype, type_dict.keys())
v_max = np.iinfo(type_dict[dtype]).max
v_min = np.iinfo(type_dict[dtype]).min
self.assertGreaterEqual(min_range, v_min)
self.assertLessEqual(max_range, v_max)
type_range = v_max - v_min
if mode == "MIN_COMBINED":
if v_min < 0:
half_range = (type_range + 1) / 2
else:
half_range = 0.0
np_ans = ((inputs.astype(np.float32) + half_range) *
(max_range - min_range) / type_range) + min_range
elif mode == "SCALED":
if narrow_range:
v_min += 1
scale_factor = max(min_range / v_min, max_range / v_max)
np_ans = inputs.astype(np.float32) * scale_factor
self.assertAllClose(tf_ans, np_ans, rtol=1e-5, atol=1e-5)
def testBasicQuint8(self):
self._testDequantizeOp(np.array([0, 128, 255]), 0.0, 6.0, dtypes.quint8)
self._testDequantizeOp(np.array([0, 128, 255]), 0.0, 123.456, dtypes.quint8)
self._testDequantizeOp(
np.array([0, 4, 42, 108, 243]), 5.0, 200.2, dtypes.quint8)
def testBasicQint8(self):
self._testDequantizeOp(np.array([-128, 0, 127]), -1.0, 2.0, dtypes.qint8)
self._testDequantizeOp(np.array([-2, 4, -17]), -5.0, -3.0, dtypes.qint8)
self._testDequantizeOp(np.array([0, -4, 42, -108]), 5.0, 40.0, dtypes.qint8)
def testScaledMode(self):
self._testDequantizeOp(np.array([-128, 0, 127]), -1.0, 2.0, dtypes.qint8,
mode="SCALED")
self._testDequantizeOp(np.array([-2, 4, -17]), -5.0, -3.0, dtypes.qint8,
mode="SCALED")
self._testDequantizeOp(np.array([0, -4, 42, -108]), 5.0, 40.0, dtypes.qint8,
mode="SCALED")
def testNarrowRange(self):
self._testDequantizeOp(np.array([-128, 0, 127]), -1.0, 2.0, dtypes.qint8,
mode="SCALED", narrow_range=True)
self._testDequantizeOp(np.array([-2, 4, -17]), -5.0, -3.0, dtypes.qint8,
mode="SCALED", narrow_range=True)
self._testDequantizeOp(np.array([0, -4, 42, -108]), 5.0, 40.0, dtypes.qint8,
mode="SCALED", narrow_range=True)
def testAxis(self):
# Generates a tensor of the specified `shape` using values from `values`
# scaled by (slice_idx + 1) along `axis` dimension.
def scale_per_slice(shape, axis, values):
# Note: repeats the values if the shape is larger than values.
out = np.take(values, np.remainder(np.arange(np.prod(shape)),
len(values))).reshape(shape)
if axis is not None:
scale_shape = [1] * len(shape)
scale_shape[axis] = shape[axis]
out *= np.arange(1, shape[axis] + 1).reshape(scale_shape)
return out
shape = np.array([2, 3, 4, 5])
values = np.array([-128, -64, 0, 38, 102, 71, 64], dtype=np.int32)
dequant_values = np.array([-2, -1.0, 0, 0.59375, 1.59375, 1.109375, 1.0],
dtype=np.float32)
for axis in [None, 0, 1, 2, 3]:
inputs = constant_op.constant(
scale_per_slice(shape, None, values), dtype=dtypes.qint8)
expected_dequantized = scale_per_slice(shape, axis, dequant_values)
if axis is None:
min_range, max_range = -2.0, 1.6
else:
num_slices = shape[axis]
min_range, max_range = [], []
for slice_idx in range(num_slices):
min_range.append(-2.0 * (slice_idx + 1))
max_range.append(1.6 * (slice_idx + 1))
dequantized = self.evaluate(
array_ops.dequantize(
inputs, min_range, max_range, mode="SCALED", axis=axis))
self.assertAllEqual(dequantized, expected_dequantized)
if axis is not None:
dequantized = self.evaluate(
array_ops.dequantize(
inputs, min_range, max_range, mode="SCALED", axis=(axis - 4)))
self.assertAllClose(dequantized, expected_dequantized)
if __name__ == "__main__":
test.main()
| DequantizeOpTest |
python | ray-project__ray | python/ray/dag/dag_node_operation.py | {
"start": 2689,
"end": 36988
} | class ____:
def __init__(
self,
operation: _DAGNodeOperation,
task_idx: int,
actor_handle: "ray.actor.ActorHandle",
requires_accelerator: bool,
):
"""
_DAGOperationGraphNode represents a node in the DAG operation graph.
It contains information about the node's in-degree, out-degree, edges,
and the operation it performs.
Args:
operation: The operation that this node performs. The operation
can be a READ, COMPUTE, or WRITE operation.
task_idx: A unique index which can be used to index into
`CompiledDAG.idx_to_task` to get the corresponding task.
actor_handle: The actor handle to which this operation belongs.
requires_accelerator: Whether this operation requires accelerator.
"""
self.operation = operation
self.task_idx = task_idx
self.actor_handle = actor_handle
self.requires_accelerator = requires_accelerator
# The in_edges and out_edges are dicts of tuples to strings.
# Each tuple (the key) contains an integer `task_idx`, which can be
# used to index into `idx_to_task` to get the corresponding task,
# and a `_DAGNodeOperationType`, which can be READ, COMPUTE, or WRITE.
# The string (the value) is the visualization information of the edge,
# it is a tuple of a label of the edge and a boolean indicating whether
# the edge is a control dependency.
self.in_edges: Dict[Tuple[int, _DAGNodeOperationType], Tuple[str, bool]] = {}
self.out_edges: Dict[Tuple[int, _DAGNodeOperationType], Tuple[str, bool]] = {}
# The synchronous nodes are all the nodes that belong to the same accelerator
# operation. Each node is represented by a tuple of its task idx and type.
self.sync_idxs: Set[Tuple[int, _DAGNodeOperationType]] = set()
# The pending synchronous nodes are the nodes that are pending to be executed,
# i.e., their in-degrees are zero. When a synchronous node is pending, it
# will be added to the pending synchronous nodes of all the nodes in the
# accelerator operation.
self.pending_sync_idxs: Set[Tuple[int, _DAGNodeOperationType]] = set()
def __repr__(self):
return (
f"_DAGOperationGraphNode("
f"operation: {self.operation}, "
f"task_idx: {self.task_idx}, "
f"actor_id: {self.actor_handle._ray_actor_id}, "
f"requires_accelerator: {self.requires_accelerator})"
)
def __lt__(self, other: "_DAGOperationGraphNode"):
"""
This function defines the order of the nodes in the priority queue used in
`_select_next_nodes`. The priority queue is a min-heap, so the node with
higher priority is considered "less than" the other node.
"""
if self.is_accelerator_op != other.is_accelerator_op:
# When one node is an accelerator operation and the other is not,
# prioritize the accelerator operation.
return self.is_accelerator_op
else:
# When either both nodes are accelerator operations or both nodes
# are not accelerator operations, prioritize the earlier task within
# the same actor and load balance tasks across actors. The tie is
# broken by the `task_idx`.
return (self.operation.exec_task_idx, self.task_idx) < (
other.operation.exec_task_idx,
other.task_idx,
)
def __eq__(self, other: "_DAGOperationGraphNode"):
"""
Two operations are equal only when they have the same `exec_task_idx` and `type`
and belong to the same actor.
"""
return (
self.actor_handle == other.actor_handle
and self.operation.exec_task_idx == other.operation.exec_task_idx
and self.operation.type == other.operation.type
)
def __hash__(self):
"""
An operation is uniquely identified by its `task_idx` and type.
"""
return hash((self.operation, self.task_idx))
@property
def in_degree(self) -> int:
return len(self.in_edges)
@property
def is_ready(self) -> bool:
"""
If a node is not an accelerator operation, it is ready when it has a zero
in-degree.
If it is an accelerator operation, it is ready when all the nodes in the
operation have zero in-degrees.
"""
return self.in_degree == 0 and (
len(self.pending_sync_idxs) == len(self.sync_idxs)
)
@property
def is_read(self) -> bool:
return self.operation.type == _DAGNodeOperationType.READ
@property
def is_accelerator_read(self) -> bool:
"""
A node is an accelerator read if it is a read node and requires accelerator.
"""
return (
self.operation.type == _DAGNodeOperationType.READ
and self.requires_accelerator
)
@property
def is_accelerator_compute(self) -> bool:
"""
A node is an accelerator compute if it is a compute node and requires accelerator.
"""
return (
self.operation.type == _DAGNodeOperationType.COMPUTE
and self.requires_accelerator
)
@property
def is_accelerator_write(self) -> bool:
"""
A node is an accelerator write if it is a write node and requires accelerator.
"""
return (
self.operation.type == _DAGNodeOperationType.WRITE
and self.requires_accelerator
)
@property
def is_accelerator_op(self) -> bool:
return (
self.is_accelerator_read
or self.is_accelerator_compute
or self.is_accelerator_write
)
def viz_str(self):
"""
A string representation of the node to be used in visualization.
"""
return self.operation.viz_str()
@property
def _actor_id(self):
return self.actor_handle._ray_actor_id.hex()
def _add_edge(
from_node: _DAGOperationGraphNode,
to_node: _DAGOperationGraphNode,
label: str = "",
control_dependency: bool = False,
):
"""
Add an edge from `from_node` to `to_node`.
Args:
from_node: The node from which the edge originates.
to_node: The node to which the edge points.
label: The label of the edge. This will be used to annotate the edge
in the visualization of the execution schedule.
"""
from_node.out_edges[(to_node.task_idx, to_node.operation.type)] = (
label,
control_dependency,
)
to_node.in_edges[(from_node.task_idx, from_node.operation.type)] = (
label,
control_dependency,
)
def _update_pending_sync_idxs(
graph: Dict[int, Dict[_DAGNodeOperationType, _DAGOperationGraphNode]],
node: _DAGOperationGraphNode,
) -> None:
"""
Update the node as pending for its synchronous nodes.
"""
idx = (node.task_idx, node.operation.type)
for task_idx, op_type in node.sync_idxs:
sync_node = graph[task_idx][op_type]
sync_node.pending_sync_idxs.add(idx)
def _push_candidate_node_if_ready(
actor_to_candidates: Dict["ray._raylet.ActorID", List[_DAGOperationGraphNode]],
graph: Dict[int, Dict[_DAGNodeOperationType, _DAGOperationGraphNode]],
node: _DAGOperationGraphNode,
) -> None:
"""
Push the node with a zero in-degree to the candidates if its operation is ready.
If it has synchronous nodes, its accelerator operation is not ready until all
the nodes are pending, then all the nodes will be pushed to the candidates.
"""
assert node.in_degree == 0, "Expected to have a zero in-degree"
# For the accelerator write node, update the in-degrees of the downstream
# accelerator read nodes and update them as pending. This is necessary because
# the data dependency edges between accelerator write and read nodes are only
# updated here. The accelerator P2P operation becomes ready after both the write
# and read nodes are marked as pending.
if node.is_accelerator_write:
for task_idx, op_type in node.out_edges:
read_node = graph[task_idx][op_type]
read_node.in_edges.pop((node.task_idx, node.operation.type))
assert read_node.is_accelerator_read and len(read_node.in_edges) == 0
_update_pending_sync_idxs(graph, read_node)
# For the accelerator operation node, update it as pending.
if len(node.sync_idxs) != 0:
_update_pending_sync_idxs(graph, node)
# The accelerator operation is ready when all the nodes have zero in-degrees.
# When the last node in the operation is updated as pending, push all the nodes
# to the candidates.
if node.is_ready:
if len(node.sync_idxs) == 0:
heapq.heappush(
actor_to_candidates[node.actor_handle._actor_id],
node,
)
else:
for task_idx, op_type in node.sync_idxs:
sync_node = graph[task_idx][op_type]
heapq.heappush(
actor_to_candidates[sync_node.actor_handle._actor_id],
sync_node,
)
def _select_next_nodes(
actor_to_candidates: Dict["ray._raylet.ActorID", List[_DAGOperationGraphNode]],
graph: Dict[int, Dict[_DAGNodeOperationType, _DAGOperationGraphNode]],
) -> Optional[List[_DAGOperationGraphNode]]:
"""
This function selects the next nodes for the topological sort to generate
execution schedule. If there are multiple candidate _DAGOperationGraphNodes,
select the node with the top priority. The priority is defined in
`_DAGOperationGraphNode.__lt__`.
For the implementation details, we maintain a priority queue for each actor,
where the head of the priority queue is the node with the smallest `exec_task_idx`.
When a node has a zero in-degree, it is added to the corresponding actor's
priority queue. For a node other than an accelerator collective node, it is ready to be
executed if it has a zero in-degree. For an accelerator collective node, it is ready to
be executed when all the nodes in its collective operation have zero in-degrees.
If a node is an accelerator collective node, it updates the `ready_collective_nodes` of
all the nodes in its collective operation. Unless all the nodes in its collective
group have zero in-degrees, this node is removed from the candidate list.
Eventually, exactly one accelerator collective node from its collective operation is
selected from the candidate list.
If the selected node is an accelerator write node, select all the downstream accelerator
read nodes. If the selected node is an accelerator collective node, select all the accelerator
compute nodes in its collective operation.
Args:
actor_to_candidates: A dictionary mapping an actor id to a list of
candidate nodes. The list is maintained as a priority queue, so
the head of the queue, i.e., `candidates[0]`, is the node with
the smallest `bind_index`.
graph: A dictionary mapping the index of a task to a dictionary of its
_DAGOperationGraphNodes for different operations.
Returns:
A list of _DAGOperationGraphNodes to be placed into the corresponding
execution schedules.
"""
top_priority_node = None
for candidates in actor_to_candidates.values():
if len(candidates) == 0:
continue
if top_priority_node is None or candidates[0] < top_priority_node:
top_priority_node = candidates[0]
if top_priority_node is None:
return None
next_nodes = [top_priority_node]
# Select all the synchronous nodes in the accelerator operation.
if len(top_priority_node.sync_idxs) != 0:
for task_idx, op_type in top_priority_node.sync_idxs:
node = graph[task_idx][op_type]
if node != top_priority_node:
next_nodes.append(node)
# Remove the selected nodes from the candidates.
for node in next_nodes:
candidates = actor_to_candidates[node.actor_handle._actor_id]
candidates.remove(node)
heapq.heapify(candidates)
# Remove the selected nodes from the candidates.
for node in next_nodes:
candidates = actor_to_candidates[node.actor_handle._actor_id]
# The accelerator read nodes are not added to the candidates.
if node in candidates:
candidates.remove(node)
heapq.heapify(candidates)
return next_nodes
def _build_dag_node_operation_graph(
idx_to_task: Dict[int, "ray.dag.compiled_dag_node.CompiledTask"],
actor_to_operation_nodes: Dict[
"ray.actor.ActorHandle", List[List[_DAGOperationGraphNode]]
],
) -> Dict[int, Dict[_DAGNodeOperationType, _DAGOperationGraphNode]]:
"""
Generate a DAG node operation graph by adding edges based on the
following rules:
#1 Add edges from READ to COMPUTE, and from COMPUTE to WRITE, which
belong to the same task.
#2 Add an edge from COMPUTE with bind_index i to COMPUTE with bind_index
i+1 if they belong to the same actor.
#3 Add an edge from WRITE of the writer task to READ of the reader task.
This is the step one of building an execution schedule for each actor.
Args:
idx_to_task: A dictionary that maps the `task_idx` to the `CompiledTask`.
`CompiledTask` contains information about a DAGNode and its downstream
nodes.
actor_to_operation_nodes: A dictionary that maps an actor handle to
a list of lists of _DAGOperationGraphNode. For the same actor, the
index of the outer list corresponds to the index of the ExecutableTask
in the list of `executable_tasks` in `actor_to_executable_tasks`. In
the inner list, the order of operations is READ, COMPUTE, and WRITE.
Returns:
A graph where each node is a _DAGOperationGraphNode. The key is `task_idx`,
the index to retrieve its task from `idx_to_task`, and the value is a
dictionary that maps the _DAGNodeOperationType (READ, COMPUTE, or WRITE)
to the corresponding _DAGOperationGraphNode
"""
assert idx_to_task
graph: Dict[int, Dict[_DAGNodeOperationType, _DAGOperationGraphNode]] = {}
for _, operation_nodes_list in actor_to_operation_nodes.items():
prev_compute_node = None
for operation_nodes in operation_nodes_list:
task_idx = operation_nodes[0].task_idx
read_node, compute_node, write_node = (
operation_nodes[0],
operation_nodes[1],
operation_nodes[2],
)
# Add edges from READ to COMPUTE, and from COMPUTE to WRITE, which
# belong to the same task.
_add_edge(read_node, compute_node)
_add_edge(compute_node, write_node)
# Add an edge from COMPUTE with `bind_index` i to COMPUTE with
# `bind_index` i+1 if they belong to the same actor.
if prev_compute_node is not None:
_add_edge(prev_compute_node, compute_node, "", True)
prev_compute_node = compute_node
assert task_idx not in graph
graph[task_idx] = {
_DAGNodeOperationType.READ: read_node,
_DAGNodeOperationType.COMPUTE: compute_node,
_DAGNodeOperationType.WRITE: write_node,
}
# Import `ray.dag` here to avoid circular import.
from ray.dag import ClassMethodNode, CollectiveOutputNode, MultiOutputNode
from ray.dag.collective_node import _CollectiveOperation
# Add an edge from WRITE of the writer task to READ of the reader task.
# Set synchronous nodes for accelerator P2P operations.
for task_idx, task in idx_to_task.items():
if not (
isinstance(task.dag_node, ClassMethodNode)
or isinstance(task.dag_node, CollectiveOutputNode)
):
# The graph is used to generate an execution schedule for each actor.
# The edge from the InputNode has no impact on the final execution
# schedule.
continue
if (
isinstance(task.dag_node, ClassMethodNode)
and task.dag_node.is_class_method_output
):
# Class method output node dependencies are handled at its upstream:
# i.e., class method node
continue
for downstream_task_idx in task.downstream_task_idxs:
downstream_dag_node = idx_to_task[downstream_task_idx].dag_node
if isinstance(downstream_dag_node, MultiOutputNode):
continue
write_node = graph[task_idx][_DAGNodeOperationType.WRITE]
if (
isinstance(downstream_dag_node, ClassMethodNode)
and downstream_dag_node.is_class_method_output
):
consumer_idxs = idx_to_task[downstream_task_idx].downstream_task_idxs
for consumer_idx in consumer_idxs:
if consumer_idx in graph:
read_node = graph[consumer_idx][_DAGNodeOperationType.READ]
_add_edge(
write_node,
read_node,
"accelerator" if write_node.requires_accelerator else "shm",
)
if write_node.requires_accelerator:
idxs = {
(task_idx, _DAGNodeOperationType.WRITE),
(consumer_idx, _DAGNodeOperationType.READ),
}
for node in [write_node, read_node]:
node.sync_idxs.update(idxs)
continue
read_node = graph[downstream_task_idx][_DAGNodeOperationType.READ]
_add_edge(
write_node,
read_node,
"accelerator" if write_node.requires_accelerator else "shm",
)
if write_node.requires_accelerator:
idxs = {
(task_idx, _DAGNodeOperationType.WRITE),
(downstream_task_idx, _DAGNodeOperationType.READ),
}
for node in [write_node, read_node]:
node.sync_idxs.update(idxs)
# Set synchronous nodes for accelerator collective operations.
collective_op_to_idxs: Dict[
_CollectiveOperation, Set[Tuple[int, _DAGNodeOperationType]]
] = defaultdict(set)
for task_idx, task in idx_to_task.items():
if (
isinstance(task.dag_node, CollectiveOutputNode)
and not task.dag_node.is_class_method_output
):
collective_op_to_idxs[task.dag_node.collective_op].add(
(task_idx, _DAGNodeOperationType.COMPUTE)
)
for idxs in collective_op_to_idxs.values():
for task_idx, op_type in idxs:
graph[task_idx][op_type].sync_idxs = idxs
return graph
def _actor_viz_label(actor: "ray.actor.ActorHandle"):
"""
Returns the label of an actor in the visualization of the execution schedule.
Args:
actor: The actor to be represented.
"""
class_name = actor._ray_actor_creation_function_descriptor.class_name
actor_id = actor._ray_actor_id.hex()
return f"Actor class name: {class_name}\nActor ID: {actor_id}"
def _node_viz_id_and_label(
node: _DAGOperationGraphNode, idx: int, optimized_index: int
):
"""
Returns the visualization id and label of a node. The visualization id is unique
across all nodes.
Args:
node: The node to be represented.
idx: The index of the node in the execution schedule.
optimized_index: The index of the node in the optimized execution schedule.
"""
node_viz_label = node.viz_str() + f" {idx},{optimized_index}"
node_viz_id = f"{node._actor_id}_{node_viz_label}"
return node_viz_id, node_viz_label
def _visualize_execution_schedule(
actor_to_execution_schedule: Dict[
"ray.actor.ActorHandle", List[_DAGOperationGraphNode]
],
actor_to_overlapped_schedule: Optional[
Dict["ray.actor.ActorHandle", List[_DAGOperationGraphNode]]
],
graph: Dict[int, Dict[_DAGNodeOperationType, _DAGOperationGraphNode]],
):
"""
Visualize the execution schedule for each actor.
The visualization will be saved as a PNG file named `compiled_graph_schedule.png`.
Details of the visualization: # noqa
Node description format:
[<task_index>] <method_name> <operation> <orig_index>, <overlap_index>
Node description fields:
operation: is R(READ), C(COMPUTE), or W(WRITE)
orig_index: the index in the original execution schedule
overlap_index: the index in the overlap-communication optimized execution schedule
If this is different from orig_index, the node is highlighted in red color
Node grouping:
The nodes belonging to the same actor are grouped in the same rectangle
The actor class name and the actor id are shown in the rectangle
Edges:
black color (without label): data dependency
black color (annotated with "shm"): shared memory channel
blue color (annotated with "accelerator): accelerator channel
dashed edge: control dependency between compute operations
Args:
actor_to_execution_schedule: A dictionary that maps an actor handle to
the execution schedule which is a list of operation nodes.
actor_to_overlapped_schedule: A dictionary that maps an actor handle to the
optimized execution schedule which is a list of operation nodes.
graph: A graph where each node is a _DAGOperationGraphNode. The key is
`task_idx`, the index to retrieve its task from `idx_to_task`, and
the value is a dictionary that maps the _DAGNodeOperationType (READ,
COMPUTE, or WRITE) to the corresponding _DAGOperationGraphNode. It is
generated by `_build_dag_node_operation_graph`.
"""
try:
import graphviz
except ImportError:
raise ImportError(
"Please install graphviz to visualize the execution schedule. "
"You can install it by running `pip install graphviz`."
)
dot = graphviz.Digraph(comment="DAG")
# A dictionary that maps a node to its visualization id
node_to_viz_id: Dict[_DAGOperationGraphNode, str] = {}
if actor_to_overlapped_schedule is None:
# TODO(rui): make the visualization more concise by only displaying
# the original schedule
actor_to_overlapped_schedule = actor_to_execution_schedule
for actor, execution_nodes in actor_to_execution_schedule.items():
overlapped_schedule = actor_to_overlapped_schedule[actor]
node_to_optimized_index = {
node: i for i, node in enumerate(overlapped_schedule)
}
actor_id = actor._ray_actor_id.hex()
with dot.subgraph(name=f"cluster_{actor_id}") as subgraph:
subgraph.attr(rank=actor_id, label=_actor_viz_label(actor))
for i, node in enumerate(execution_nodes):
optimized_index = node_to_optimized_index.get(node)
node_viz_id, node_viz_label = _node_viz_id_and_label(
node, i, optimized_index
)
color = "red" if optimized_index != i else "black"
subgraph.node(node_viz_id, node_viz_label, color=color)
node_to_viz_id[node] = node_viz_id
for actor, execution_nodes in actor_to_execution_schedule.items():
for i, node in enumerate(execution_nodes):
node_viz_id = node_to_viz_id[node]
for out_edge, viz_info in node.out_edges.items():
label, control_dependency = viz_info
out_task_idx, out_op_type = out_edge
out_node = graph[out_task_idx][out_op_type]
out_node_viz_id = node_to_viz_id[out_node]
color = "blue" if label == "accelerator" else "black"
style = "dashed" if control_dependency else "solid"
dot.edge(
node_viz_id, out_node_viz_id, label=label, color=color, style=style
)
# Add legend
with dot.subgraph(name="cluster_legend") as legend:
legend.attr(label="Legend", labelloc="t", fontsize="20", bgcolor="lightgrey")
# Single node and its explanation
legend.node("example_node", "[0] bwd C 10,10\n")
explanation = (
'<<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">' # noqa
'<TR><TD ALIGN="LEFT"><B>Node description format:</B></TD></TR>'
'<TR><TD ALIGN="LEFT">[<task_index>] <method_name> <operation> <orig_index>, <overlap_index></TD></TR>' # noqa
"<TR><TD></TD></TR>"
'<TR><TD ALIGN="LEFT"><B>Node description fields:</B></TD></TR>'
'<TR><TD ALIGN="LEFT">operation: is R(READ), C(COMPUTE), or W(WRITE)</TD></TR>' # noqa
'<TR><TD ALIGN="LEFT">orig_index: the index in the original execution schedule</TD></TR>' # noqa
'<TR><TD ALIGN="LEFT">overlap_index: the index in the overlap-communication optimized execution schedule</TD></TR>' # noqa
'<TR><TD ALIGN="LEFT">If this is different from orig_index, the node is highlighted in <FONT COLOR="red">red color</FONT></TD></TR>' # noqa
"<TR><TD></TD></TR>"
'<TR><TD ALIGN="LEFT"><B>Node grouping:</B></TD></TR>'
'<TR><TD ALIGN="LEFT">The nodes belonging to the same actor are grouped in the same rectangle</TD></TR>' # noqa
'<TR><TD ALIGN="LEFT">The actor class name and the actor id are shown in the rectangle</TD></TR>' # noqa
"<TR><TD></TD></TR>"
'<TR><TD ALIGN="LEFT"><B>Edges:</B></TD></TR>'
'<TR><TD ALIGN="LEFT">black color (without label): data dependency</TD></TR>' # noqa
'<TR><TD ALIGN="LEFT">black color (annotated with "shm"): shared memory channel</TD></TR>' # noqa
'<TR><TD ALIGN="LEFT"><FONT COLOR="blue">blue color</FONT> (annotated with "accelerator): accelerator channel</TD></TR>' # noqa
'<TR><TD ALIGN="LEFT">dashed edge: control dependency between compute operations</TD></TR>' # noqa
"</TABLE>>"
)
legend.node("example_explanation", explanation, shape="plaintext")
legend.edge("example_node", "example_explanation", style="invis")
logger.info(
"Writing compiled graph schedule visualization "
"to compiled_graph_schedule.png"
)
dot.render("compiled_graph_schedule", format="png", view=False)
def _generate_actor_to_execution_schedule(
graph: Dict[int, Dict[_DAGNodeOperationType, _DAGOperationGraphNode]],
) -> Dict["ray.actor.ActorHandle", List[_DAGOperationGraphNode]]:
"""
Generate an execution schedule for each actor. The schedule is a list of
operation nodes to be executed. The function uses a topological sort
algorithm to generate the schedule.
Args:
graph: A graph where each node is a _DAGOperationGraphNode. The key is
`task_idx`, the index to retrieve its task from `idx_to_task`, and
the value is a dictionary that maps the _DAGNodeOperationType (READ,
COMPUTE, or WRITE) to the corresponding _DAGOperationGraphNode. It is
generated by `_build_dag_node_operation_graph`.
Returns:
actor_to_execution_schedule: A dictionary that maps an actor handle to
the execution schedule which is a list of operation nodes to be
executed.
"""
# Mapping from the actor handle to the execution schedule which is a list
# of operations to be executed.
actor_to_execution_schedule: Dict[
"ray.actor.ActorHandle", List[_DAGOperationGraphNode]
] = defaultdict(list)
# A dictionary mapping an actor id to a list of candidate nodes. The list
# is maintained as a priority queue, so the head of the queue, i.e.,
# `candidates[0]`, is the node with the smallest `bind_index`.
actor_to_candidates: Dict[
"ray._raylet.ActorID", List[_DAGOperationGraphNode]
] = defaultdict(list)
for _, node_dict in graph.items():
for _, node in node_dict.items():
# A node with a zero in-degree edge means all of its dependencies
# have been satisfied, including both data and control dependencies.
# Therefore, it is a candidate for execution.
if node.in_degree == 0:
_push_candidate_node_if_ready(actor_to_candidates, graph, node)
visited_nodes = set()
# Use topological sort algorithm to generate the execution schedule.
while True:
# Select a list of nodes to be executed. There are three cases:
# 1. If a selected node is not an accelerator operation, only itself is returned.
# 2. If a selected node is an accelerator write operation, the corresponding accelerator
# read operations are also returned.
# 3. If a selected node is an accelerator collective operation, all the nodes in
# its collective operation are returned.
nodes = _select_next_nodes(actor_to_candidates, graph)
if nodes is None:
break
# Add the selected nodes to the execution schedule.
for node in nodes:
assert node not in visited_nodes
visited_nodes.add(node)
actor_to_execution_schedule[node.actor_handle].append(node)
# Update the in-degree of the downstream nodes.
for node in nodes:
for out_node_task_idx, out_node_type in node.out_edges:
out_node = graph[out_node_task_idx][out_node_type]
if out_node in visited_nodes:
# If the downstream node is already visited, it has been added
# to the execution schedule. They are the accelerator read nodes in
# case 2.
continue
out_node.in_edges.pop((node.task_idx, node.operation.type))
if out_node.in_degree == 0:
_push_candidate_node_if_ready(actor_to_candidates, graph, out_node)
assert len(visited_nodes) == len(graph) * 3, "Expected all nodes to be visited"
return actor_to_execution_schedule
def _generate_overlapped_execution_schedule(
actor_to_execution_schedule: Dict[
"ray.actor.ActorHandle", List[_DAGOperationGraphNode]
],
) -> Dict["ray.actor.ActorHandle", List[_DAGOperationGraphNode]]:
"""
From an existing execution schedule, generate a new schedule by overlapping
computation and communication.
Currently, the algorithm generates a new schedule for each actor as follows:
For each accelerator read operation (i.e., recv), scan backwards to find the nearest
compute node to swap with so that the accelerator read operation can be overlapped
with computation.
Collective operations are not yet supported.
Args:
actor_to_execution_schedule: A dictionary that maps an actor handle to
the existing execution schedule for the actor. The schedule is a list
is a list of operations to be executed.
Returns:
A dictionary that maps an actor handle to the overlapped execution schedule
for the actor.
"""
actor_to_overlapped_schedule: Dict[
"ray.actor.ActorHandle", List[_DAGOperationGraphNode]
] = copy.deepcopy(actor_to_execution_schedule)
for overlapped_schedule in actor_to_overlapped_schedule.values():
for i in range(len(overlapped_schedule)):
if (
overlapped_schedule[i].operation.type == _DAGNodeOperationType.READ
and overlapped_schedule[i].requires_accelerator
):
# For each accelerator read operation (i.e., recv), scan backwards
# to find the nearest compute node to swap with so that
# the accelerator read operation can be overlapped with computation.
for j in range(i - 1, -1, -1):
if (
overlapped_schedule[j].operation.type
== _DAGNodeOperationType.COMPUTE
):
# Found a desired compute operation, make the swap
accelerator_read_op = overlapped_schedule[i]
prev_ops = overlapped_schedule[j:i]
overlapped_schedule[j + 1 : i + 1] = prev_ops
overlapped_schedule[j] = accelerator_read_op
break
if (
overlapped_schedule[j].operation.type
== _DAGNodeOperationType.READ
or overlapped_schedule[j].operation.type
== _DAGNodeOperationType.WRITE
) and overlapped_schedule[j].requires_accelerator:
# Found an accelerator read/write operation, skip the overlap
# optimization to keep relative order of accelerator operations
break
return actor_to_overlapped_schedule
def _extract_execution_schedule(
actor_to_execution_schedule: Dict[
"ray.actor.ActorHandle", List[_DAGOperationGraphNode]
],
) -> Dict["ray.actor.ActorHandle", List[_DAGNodeOperation]]:
"""
Extract _DAGNodeOperation from _DAGOperationGraphNode in the schedule
and discard unnecessary information.
"""
return {
actor: [node.operation for node in nodes]
for actor, nodes in actor_to_execution_schedule.items()
}
| _DAGOperationGraphNode |
python | PyCQA__pylint | tests/pyreverse/functional/class_diagrams/annotations/attributes_annotation.py | {
"start": 149,
"end": 568
} | class ____:
class_attr: list[Dummy] = []
def __init__(self, param: str) -> None:
self.param = param
self.union: Union[int, str] = ""
self.alternative_union_syntax: str | int = 0
self.optional: Optional[Dummy] = None
self.alternative_optional: int | None = None
self.alternative_optional_swapped: None | int = None
self.optional_union: int | str = None
| Dummy2 |
python | doocs__leetcode | lcci/16.06.Smallest Difference/Solution.py | {
"start": 0,
"end": 338
} | class ____:
def smallestDifference(self, a: List[int], b: List[int]) -> int:
b.sort()
ans = inf
n = len(b)
for x in a:
j = bisect_left(b, x)
if j < n:
ans = min(ans, b[j] - x)
if j:
ans = min(ans, x - b[j - 1])
return ans
| Solution |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_dc_transforms_future_anno_sync.py | {
"start": 2575,
"end": 33697
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
@testing.fixture(params=["(MAD, DB)", "(DB, MAD)"])
def dc_decl_base(self, request, metadata):
_md = metadata
if request.param == "(MAD, DB)":
class Base(MappedAsDataclass, DeclarativeBase):
_mad_before = True
metadata = _md
type_annotation_map = {
str: String().with_variant(
String(50), "mysql", "mariadb", "oracle"
)
}
else:
# test #8665 by reversing the order of the classes
class Base(DeclarativeBase, MappedAsDataclass):
_mad_before = False
metadata = _md
type_annotation_map = {
str: String().with_variant(
String(50), "mysql", "mariadb", "oracle"
)
}
yield Base
Base.registry.dispose()
def test_basic_constructor_repr_base_cls(
self, dc_decl_base: Type[MappedAsDataclass]
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
x: Mapped[Optional[int]] = mapped_column(default=None)
bs: Mapped[List["B"]] = relationship( # noqa: F821
default_factory=list
)
class B(dc_decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
a_id: Mapped[Optional[int]] = mapped_column(
ForeignKey("a.id"), init=False
)
x: Mapped[Optional[int]] = mapped_column(default=None)
A.__qualname__ = "some_module.A"
B.__qualname__ = "some_module.B"
eq_(
pyinspect.getfullargspec(A.__init__),
pyinspect.FullArgSpec(
args=["self", "data", "x", "bs"],
varargs=None,
varkw=None,
defaults=(LoaderCallableStatus.DONT_SET, mock.ANY),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
eq_(
pyinspect.getfullargspec(B.__init__),
pyinspect.FullArgSpec(
args=["self", "data", "x"],
varargs=None,
varkw=None,
defaults=(LoaderCallableStatus.DONT_SET,),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
a2 = A("10", x=5, bs=[B("data1"), B("data2", x=12)])
eq_(
repr(a2),
"some_module.A(id=None, data='10', x=5, "
"bs=[some_module.B(id=None, data='data1', a_id=None, x=None), "
"some_module.B(id=None, data='data2', a_id=None, x=12)])",
)
a3 = A("data")
eq_(repr(a3), "some_module.A(id=None, data='data', x=None, bs=[])")
# TODO: get this test to work with future anno mode as well
@testing.exclusions.closed(
"doesn't work for future annotations mode yet"
) # noqa: E501
def test_generic_class(self):
"""further test for #8665"""
T_Value = TypeVar("T_Value")
class SomeBaseClass(DeclarativeBase):
pass
class GenericSetting(
MappedAsDataclass, SomeBaseClass, Generic[T_Value]
):
__tablename__ = "xx"
id: Mapped[int] = mapped_column(
Integer, primary_key=True, init=False
)
key: Mapped[str] = mapped_column(String, init=True)
value: Mapped[T_Value] = mapped_column(
JSON, init=True, default_factory=lambda: {}
)
new_instance: GenericSetting[Dict[str, Any]] = ( # noqa: F841
GenericSetting(key="x", value={"foo": "bar"})
)
def test_no_anno_doesnt_go_into_dc(
self, dc_decl_base: Type[MappedAsDataclass]
):
class User(dc_decl_base):
__tablename__: ClassVar[Optional[str]] = "user"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
username: Mapped[str]
password: Mapped[str]
addresses: Mapped[List["Address"]] = relationship( # noqa: F821
default_factory=list
)
class Address(dc_decl_base):
__tablename__: ClassVar[Optional[str]] = "address"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
# should not be in the dataclass constructor
user_id = mapped_column(ForeignKey(User.id))
email_address: Mapped[str]
a1 = Address("email@address")
eq_(a1.email_address, "email@address")
def test_warn_on_non_dc_mixin(self):
class _BaseMixin:
create_user: Mapped[int] = mapped_column()
update_user: Mapped[Optional[int]] = mapped_column(
default=None, init=False
)
class Base(DeclarativeBase, MappedAsDataclass, _BaseMixin):
pass
class SubMixin:
foo: Mapped[str]
bar: Mapped[str] = mapped_column()
with testing.expect_raises_message(
exc.InvalidRequestError,
r"When transforming .* to a dataclass, attribute\(s\) "
r"'foo', 'bar' originates from superclass .*SubMixin",
):
class User(SubMixin, Base):
__tablename__ = "sys_user"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
username: Mapped[str] = mapped_column(String)
password: Mapped[str] = mapped_column(String)
def test_basic_constructor_repr_cls_decorator(
self, registry: _RegistryType
):
@registry.mapped_as_dataclass()
class A:
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
x: Mapped[Optional[int]] = mapped_column(default=None)
bs: Mapped[List["B"]] = relationship( # noqa: F821
default_factory=list
)
@registry.mapped_as_dataclass()
class B:
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
a_id = mapped_column(ForeignKey("a.id"), init=False)
data: Mapped[str]
x: Mapped[Optional[int]] = mapped_column(default=None)
A.__qualname__ = "some_module.A"
B.__qualname__ = "some_module.B"
eq_(
pyinspect.getfullargspec(A.__init__),
pyinspect.FullArgSpec(
args=["self", "data", "x", "bs"],
varargs=None,
varkw=None,
defaults=(LoaderCallableStatus.DONT_SET, mock.ANY),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
eq_(
pyinspect.getfullargspec(B.__init__),
pyinspect.FullArgSpec(
args=["self", "data", "x"],
varargs=None,
varkw=None,
defaults=(LoaderCallableStatus.DONT_SET,),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
a2 = A("10", x=5, bs=[B("data1"), B("data2", x=12)])
# note a_id isn't included because it wasn't annotated
eq_(
repr(a2),
"some_module.A(id=None, data='10', x=5, "
"bs=[some_module.B(id=None, data='data1', x=None), "
"some_module.B(id=None, data='data2', x=12)])",
)
a3 = A("data")
eq_(repr(a3), "some_module.A(id=None, data='data', x=None, bs=[])")
# TODO: get this test to work with future anno mode as well
@testing.exclusions.closed(
"doesn't work for future annotations mode yet"
) # noqa: E501
@testing.variation("dc_type", ["fn_decorator", "decorator", "superclass"])
def test_dataclass_fn(self, dc_type: Variation):
annotations = {}
def dc_callable(kls, **kw) -> Type[Any]:
annotations[kls] = kls.__annotations__
return dataclasses.dataclass(kls, **kw) # type: ignore
if dc_type.fn_decorator:
reg = registry()
@mapped_as_dataclass(reg, dataclass_callable=dc_callable)
class MappedClass:
__tablename__ = "mapped_class"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
eq_(annotations, {MappedClass: {"id": int, "name": str}})
elif dc_type.decorator:
reg = registry()
@reg.mapped_as_dataclass(dataclass_callable=dc_callable)
class MappedClass:
__tablename__ = "mapped_class"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
eq_(annotations, {MappedClass: {"id": int, "name": str}})
elif dc_type.superclass:
class Base(DeclarativeBase):
pass
class Mixin(MappedAsDataclass, dataclass_callable=dc_callable):
id: Mapped[int] = mapped_column(primary_key=True)
class MappedClass(Mixin, Base):
__tablename__ = "mapped_class"
name: Mapped[str]
eq_(
annotations,
{Mixin: {"id": int}, MappedClass: {"id": int, "name": str}},
)
else:
dc_type.fail()
def test_default_fn(self, dc_decl_base: Type[MappedAsDataclass]):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column(default="d1")
data2: Mapped[str] = mapped_column(default_factory=lambda: "d2")
a1 = A()
eq_(a1.data, "d1")
eq_(a1.data2, "d2")
def test_default_factory_vs_collection_class(
self, dc_decl_base: Type[MappedAsDataclass]
):
# this is currently the error raised by dataclasses. We can instead
# do this validation ourselves, but overall I don't know that we
# can hit every validation and rule that's in dataclasses
with expect_raises_message(
ValueError, "cannot specify both default and default_factory"
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column(
default="d1", default_factory=lambda: "d2"
)
def test_combine_args_from_pep593(self, decl_base: Type[DeclarativeBase]):
"""test that we can set up column-level defaults separate from
dataclass defaults with a pep593 setup; however the dataclass
defaults need to override the insert_defaults so that they
take place on INSERT
"""
global intpk, str30, s_str30, user_fk
intpk = Annotated[int, mapped_column(primary_key=True)]
str30 = Annotated[
str, mapped_column(String(30), insert_default=func.foo())
]
s_str30 = Annotated[
str,
mapped_column(String(30), server_default="some server default"),
]
user_fk = Annotated[int, mapped_column(ForeignKey("user_account.id"))]
class User(MappedAsDataclass, decl_base):
__tablename__ = "user_account"
# we need this case for dataclasses that can't derive things
# from Annotated yet at the typing level
id: Mapped[intpk] = mapped_column(init=False)
name_plain: Mapped[str30] = mapped_column()
name_no_init: Mapped[str30] = mapped_column(init=False)
name_none: Mapped[Optional[str30]] = mapped_column(default=None)
name_insert_none: Mapped[Optional[str30]] = mapped_column(
insert_default=None, init=False
)
name: Mapped[str30] = mapped_column(default="hi")
name_insert: Mapped[str30] = mapped_column(
insert_default="hi", init=False
)
name2: Mapped[s_str30] = mapped_column(default="there")
name2_insert: Mapped[s_str30] = mapped_column(
insert_default="there", init=False
)
addresses: Mapped[List["Address"]] = relationship( # noqa: F821
back_populates="user", default_factory=list
)
class Address(MappedAsDataclass, decl_base):
__tablename__ = "address"
id: Mapped[intpk] = mapped_column(init=False)
email_address: Mapped[str]
user_id: Mapped[user_fk] = mapped_column(init=False)
user: Mapped[Optional["User"]] = relationship(
back_populates="addresses", default=None
)
is_true(User.__table__.c.id.primary_key)
# the default from the Annotated overrides mapped_cols that have
# nothing for default or insert default
is_true(User.__table__.c.name_plain.default.arg.compare(func.foo()))
is_true(User.__table__.c.name_no_init.default.arg.compare(func.foo()))
# mapped cols that have None for default or insert default, that
# default overrides
is_true(User.__table__.c.name_none.default is None)
is_true(User.__table__.c.name_insert_none.default is None)
# mapped cols that have a value for default or insert default, that
# default overrides
is_true(User.__table__.c.name.default.arg == "hi")
is_true(User.__table__.c.name2.default.arg == "there")
is_true(User.__table__.c.name_insert.default.arg == "hi")
is_true(User.__table__.c.name2_insert.default.arg == "there")
eq_(User.__table__.c.name2.server_default.arg, "some server default")
is_true(Address.__table__.c.user_id.references(User.__table__.c.id))
u1 = User(name_plain="name")
eq_(u1.name_none, None)
eq_(u1.name_insert_none, None)
eq_(u1.name, "hi")
eq_(u1.name2, "there")
eq_(u1.name_insert, None)
eq_(u1.name2_insert, None)
def test_inheritance(self, dc_decl_base: Type[MappedAsDataclass]):
class Person(dc_decl_base):
__tablename__ = "person"
person_id: Mapped[int] = mapped_column(
primary_key=True, init=False
)
name: Mapped[str]
type: Mapped[str] = mapped_column(init=False)
__mapper_args__ = {"polymorphic_on": type}
class Engineer(Person):
__tablename__ = "engineer"
person_id: Mapped[int] = mapped_column(
ForeignKey("person.person_id"), primary_key=True, init=False
)
status: Mapped[str] = mapped_column(String(30))
engineer_name: Mapped[str]
primary_language: Mapped[str]
__mapper_args__ = {"polymorphic_identity": "engineer"}
e1 = Engineer("nm", "st", "en", "pl")
eq_(e1.name, "nm")
eq_(e1.status, "st")
eq_(e1.engineer_name, "en")
eq_(e1.primary_language, "pl")
def test_non_mapped_fields_wo_mapped_or_dc(
self, dc_decl_base: Type[MappedAsDataclass]
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: str
ctrl_one: str = dataclasses.field()
some_field: int = dataclasses.field(default=5)
a1 = A("data", "ctrl_one", 5)
eq_(
dataclasses.asdict(a1),
{
"ctrl_one": "ctrl_one",
"data": "data",
"id": None,
"some_field": 5,
},
)
def test_non_mapped_fields_wo_mapped_or_dc_w_inherits(
self, dc_decl_base: Type[MappedAsDataclass]
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: str
ctrl_one: str = dataclasses.field()
some_field: int = dataclasses.field(default=5)
class B(A):
b_data: Mapped[str] = mapped_column(default="bd")
# ensure we didnt break dataclasses contract of removing Field
# issue #8880
eq_(A.__dict__["some_field"], 5)
assert "ctrl_one" not in A.__dict__
b1 = B(data="data", ctrl_one="ctrl_one", some_field=5, b_data="x")
eq_(
dataclasses.asdict(b1),
{
"ctrl_one": "ctrl_one",
"data": "data",
"id": None,
"some_field": 5,
"b_data": "x",
},
)
def test_init_var(self, dc_decl_base: Type[MappedAsDataclass]):
class User(dc_decl_base):
__tablename__ = "user_account"
id: Mapped[int] = mapped_column(init=False, primary_key=True)
name: Mapped[str]
password: InitVar[str]
repeat_password: InitVar[str]
password_hash: Mapped[str] = mapped_column(
init=False, nullable=False
)
def __post_init__(self, password: str, repeat_password: str):
if password != repeat_password:
raise ValueError("passwords do not match")
self.password_hash = f"some hash... {password}"
u1 = User(name="u1", password="p1", repeat_password="p1")
eq_(u1.password_hash, "some hash... p1")
self.assert_compile(
select(User),
"SELECT user_account.id, user_account.name, "
"user_account.password_hash FROM user_account",
)
def test_integrated_dc(self, dc_decl_base: Type[MappedAsDataclass]):
"""We will be telling users "this is a dataclass that is also
mapped". Therefore, they will want *any* kind of attribute to do what
it would normally do in a dataclass, including normal types without any
field and explicit use of dataclasses.field(). additionally, we'd like
``Mapped`` to mean "persist this attribute". So the absence of
``Mapped`` should also mean something too.
"""
class A(dc_decl_base):
__tablename__ = "a"
ctrl_one: str = dataclasses.field()
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
some_field: int = dataclasses.field(default=5)
some_none_field: Optional[str] = dataclasses.field(default=None)
some_other_int_field: int = 10
# some field is part of the constructor
a1 = A("ctrlone", "datafield")
eq_(
dataclasses.asdict(a1),
{
"ctrl_one": "ctrlone",
"data": "datafield",
"id": None,
"some_field": 5,
"some_none_field": None,
"some_other_int_field": 10,
},
)
a2 = A(
"ctrlone",
"datafield",
some_field=7,
some_other_int_field=12,
some_none_field="x",
)
eq_(
dataclasses.asdict(a2),
{
"ctrl_one": "ctrlone",
"data": "datafield",
"id": None,
"some_field": 7,
"some_none_field": "x",
"some_other_int_field": 12,
},
)
# only Mapped[] is mapped
self.assert_compile(select(A), "SELECT a.id, a.data FROM a")
eq_(
pyinspect.getfullargspec(A.__init__),
pyinspect.FullArgSpec(
args=[
"self",
"ctrl_one",
"data",
"some_field",
"some_none_field",
"some_other_int_field",
],
varargs=None,
varkw=None,
defaults=(5, None, 10),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
def test_dc_on_top_of_non_dc(self, decl_base: Type[DeclarativeBase]):
class Person(decl_base):
__tablename__ = "person"
person_id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
type: Mapped[str] = mapped_column()
__mapper_args__ = {"polymorphic_on": type}
class Engineer(MappedAsDataclass, Person):
__tablename__ = "engineer"
person_id: Mapped[int] = mapped_column(
ForeignKey("person.person_id"), primary_key=True, init=False
)
status: Mapped[str] = mapped_column(String(30))
engineer_name: Mapped[str]
primary_language: Mapped[str]
__mapper_args__ = {"polymorphic_identity": "engineer"}
e1 = Engineer("st", "en", "pl")
eq_(e1.status, "st")
eq_(e1.engineer_name, "en")
eq_(e1.primary_language, "pl")
eq_(
pyinspect.getfullargspec(Person.__init__),
# the boring **kw __init__
pyinspect.FullArgSpec(
args=["self"],
varargs=None,
varkw="kwargs",
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
eq_(
pyinspect.getfullargspec(Engineer.__init__),
# the exciting dataclasses __init__
pyinspect.FullArgSpec(
args=["self", "status", "engineer_name", "primary_language"],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
def test_compare(self, dc_decl_base: Type[MappedAsDataclass]):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, compare=False)
data: Mapped[str]
a1 = A(id=0, data="foo")
a2 = A(id=1, data="foo")
eq_(a1, a2)
def test_kw_only_attribute(self, dc_decl_base: Type[MappedAsDataclass]):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(kw_only=True)
fas = pyinspect.getfullargspec(A.__init__)
eq_(fas.args, ["self", "id"])
eq_(fas.kwonlyargs, ["data"])
@testing.combinations(True, False, argnames="unsafe_hash")
def test_hash_attribute(
self, dc_decl_base: Type[MappedAsDataclass], unsafe_hash
):
class A(dc_decl_base, unsafe_hash=unsafe_hash):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, hash=False)
data: Mapped[str] = mapped_column(hash=True)
a = A(id=1, data="x")
if not unsafe_hash or not dc_decl_base._mad_before:
with expect_raises(TypeError):
a_hash1 = hash(a)
else:
a_hash1 = hash(a)
a.id = 41
eq_(hash(a), a_hash1)
a.data = "y"
ne_(hash(a), a_hash1)
def test_kw_only_dataclass_constant(
self, dc_decl_base: Type[MappedAsDataclass]
):
class Mixin(MappedAsDataclass):
a: Mapped[int] = mapped_column(primary_key=True)
b: Mapped[int] = mapped_column(default=1)
class Child(Mixin, dc_decl_base):
__tablename__ = "child"
_: dataclasses.KW_ONLY
c: Mapped[int]
c1 = Child(1, c=5)
eq_(c1, Child(a=1, b=1, c=5))
def test_mapped_column_overrides(self, dc_decl_base):
"""test #8688"""
class TriggeringMixin(MappedAsDataclass):
mixin_value: Mapped[int] = mapped_column(BigInteger)
class NonTriggeringMixin(MappedAsDataclass):
mixin_value: Mapped[int]
class Foo(dc_decl_base, TriggeringMixin):
__tablename__ = "foo"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
foo_value: Mapped[float] = mapped_column(default=78)
class Bar(dc_decl_base, NonTriggeringMixin):
__tablename__ = "bar"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
bar_value: Mapped[float] = mapped_column(default=78)
f1 = Foo(mixin_value=5)
eq_(f1.foo_value, 78)
b1 = Bar(mixin_value=5)
eq_(b1.bar_value, 78)
def test_mixing_MappedAsDataclass_with_decorator_raises(self, registry):
"""test #9211"""
class Mixin(MappedAsDataclass):
id: Mapped[int] = mapped_column(primary_key=True, init=False)
with expect_raises_message(
exc.InvalidRequestError,
"Class .*Foo.* is already a dataclass; ensure that "
"base classes / decorator styles of establishing dataclasses "
"are not being mixed. ",
):
@mapped_as_dataclass(registry)
class Foo(Mixin):
bar_value: Mapped[float] = mapped_column(default=78)
def test_MappedAsDataclass_table_provided(self, registry):
"""test #11973"""
with expect_raises_message(
exc.InvalidRequestError,
"Class .*Foo.* already defines a '__table__'. "
"ORM Annotated Dataclasses do not support a pre-existing "
"'__table__' element",
):
@registry.mapped_as_dataclass
class Foo:
__table__ = Table("foo", registry.metadata)
foo: Mapped[float]
def test_dataclass_exception_wrapped(self, dc_decl_base):
with expect_raises_message(
exc.InvalidRequestError,
r"Python dataclasses error encountered when creating dataclass "
r"for \'Foo\': .*Please refer to Python dataclasses.*",
) as ec:
class Foo(dc_decl_base):
id: Mapped[int] = mapped_column(primary_key=True, init=False)
foo_value: Mapped[float] = mapped_column(default=78)
foo_no_value: Mapped[float] = mapped_column()
__tablename__ = "foo"
is_true(isinstance(ec.error.__cause__, TypeError))
def test_dataclass_default(self, dc_decl_base):
"""test for #9879"""
def c10():
return 10
def c20():
return 20
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
def_init: Mapped[int] = mapped_column(default=42)
call_init: Mapped[int] = mapped_column(default_factory=c10)
def_no_init: Mapped[int] = mapped_column(default=13, init=False)
call_no_init: Mapped[int] = mapped_column(
default_factory=c20, init=False
)
a = A(id=100)
eq_(a.def_init, 42)
eq_(a.call_init, 10)
eq_(a.def_no_init, 13)
eq_(a.call_no_init, 20)
fields = {f.name: f for f in dataclasses.fields(A)}
eq_(fields["def_init"].default, LoaderCallableStatus.DONT_SET)
eq_(fields["call_init"].default_factory, c10)
eq_(fields["def_no_init"].default, dataclasses.MISSING)
ne_(fields["def_no_init"].default_factory, dataclasses.MISSING)
eq_(fields["call_no_init"].default_factory, c20)
def test_dataclass_default_callable(self, dc_decl_base):
"""test for #9936"""
def cd():
return 42
with expect_deprecated(
"Callable object passed to the ``default`` parameter for "
"attribute 'value' in a ORM-mapped Dataclasses context is "
"ambiguous, and this use will raise an error in a future "
"release. If this callable is intended to produce Core level ",
"Callable object passed to the ``default`` parameter for "
"attribute 'no_init' in a ORM-mapped Dataclasses context is "
"ambiguous, and this use will raise an error in a future "
"release. If this callable is intended to produce Core level ",
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
value: Mapped[int] = mapped_column(default=cd)
no_init: Mapped[int] = mapped_column(default=cd, init=False)
a = A(id=100)
is_false("no_init" in a.__dict__)
eq_(a.value, cd)
eq_(a.no_init, None)
fields = {f.name: f for f in dataclasses.fields(A)}
eq_(fields["value"].default, cd)
eq_(fields["no_init"].default, cd)
def test_dataclass_metadata(self, dc_decl_base):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
value: Mapped[str] = mapped_column(
dataclass_metadata={"meta_key": "meta_value"}
)
fields = {f.name: f for f in dataclasses.fields(A)}
eq_(fields["id"].metadata, {})
eq_(fields["value"].metadata, {"meta_key": "meta_value"})
@testing.requires.python314
def test_apply_dc_deferred_annotations(self, dc_decl_base):
"""test for #12952"""
class Message(dc_decl_base):
__tablename__ = "message"
id: Mapped[int] = mapped_column(primary_key=True)
content: Mapped[str]
user_id: Mapped[int] = mapped_column(ForeignKey("user.id"))
# annotation is unquoted and refers to nonexistent class (and if
# this is test_dc_transforms.py, __future__ annotations is not
# turned on), so would be rejected by any python interpreter < 3.14
# up front. with python 3.14, the dataclass scan takes place
# and has to fetch the annotations using get_annotations()
# so that refs are turned into FwdRef without being resolved
user: Mapped[UnavailableUser] = relationship( # type: ignore # noqa
back_populates="messages"
)
# The key assertion: Message should be a dataclass
is_true(dataclasses.is_dataclass(Message))
# Verify the dataclass has proper __init__ signature
sig = pyinspect.signature(Message.__init__)
is_true("id" in sig.parameters)
is_true("content" in sig.parameters)
is_true("user_id" in sig.parameters)
| DCTransformsTest |
python | keras-team__keras | keras/src/utils/jax_layer.py | {
"start": 993,
"end": 24297
} | class ____(Layer):
"""Keras Layer that wraps a JAX model.
This layer enables the use of JAX components within Keras when using JAX as
the backend for Keras.
## Model function
This layer accepts JAX models in the form of a function, `call_fn`, which
must take the following arguments with these exact names:
- `params`: trainable parameters of the model.
- `state` (*optional*): non-trainable state of the model. Can be omitted if
the model has no non-trainable state.
- `rng` (*optional*): a `jax.random.PRNGKey` instance. Can be omitted if the
model does not need RNGs, neither during training nor during inference.
- `inputs`: inputs to the model, a JAX array or a `PyTree` of arrays.
- `training` (*optional*): an argument specifying if we're in training mode
or inference mode, `True` is passed in training mode. Can be omitted if
the model behaves the same in training mode and inference mode.
The `inputs` argument is mandatory. Inputs to the model must be provided via
a single argument. If the JAX model takes multiple inputs as separate
arguments, they must be combined into a single structure, for instance in a
`tuple` or a `dict`.
## Model weights initialization
The initialization of the `params` and `state` of the model can be handled
by this layer, in which case the `init_fn` argument must be provided. This
allows the model to be initialized dynamically with the right shape.
Alternatively, and if the shape is known, the `params` argument and
optionally the `state` argument can be used to create an already initialized
model.
The `init_fn` function, if provided, must take the following arguments with
these exact names:
- `rng`: a `jax.random.PRNGKey` instance.
- `inputs`: a JAX array or a `PyTree` of arrays with placeholder values to
provide the shape of the inputs.
- `training` (*optional*): an argument specifying if we're in training mode
or inference mode. `True` is always passed to `init_fn`. Can be omitted
regardless of whether `call_fn` has a `training` argument.
## Models with non-trainable state
For JAX models that have non-trainable state:
- `call_fn` must have a `state` argument
- `call_fn` must return a `tuple` containing the outputs of the model and
the new non-trainable state of the model
- `init_fn` must return a `tuple` containing the initial trainable params of
the model and the initial non-trainable state of the model.
This code shows a possible combination of `call_fn` and `init_fn` signatures
for a model with non-trainable state. In this example, the model has a
`training` argument and an `rng` argument in `call_fn`.
```python
def stateful_call(params, state, rng, inputs, training):
outputs = ...
new_state = ...
return outputs, new_state
def stateful_init(rng, inputs):
initial_params = ...
initial_state = ...
return initial_params, initial_state
```
## Models without non-trainable state
For JAX models with no non-trainable state:
- `call_fn` must not have a `state` argument
- `call_fn` must return only the outputs of the model
- `init_fn` must return only the initial trainable params of the model.
This code shows a possible combination of `call_fn` and `init_fn` signatures
for a model without non-trainable state. In this example, the model does not
have a `training` argument and does not have an `rng` argument in `call_fn`.
```python
def stateless_call(params, inputs):
outputs = ...
return outputs
def stateless_init(rng, inputs):
initial_params = ...
return initial_params
```
## Conforming to the required signature
If a model has a different signature than the one required by `JaxLayer`,
one can easily write a wrapper method to adapt the arguments. This example
shows a model that has multiple inputs as separate arguments, expects
multiple RNGs in a `dict`, and has a `deterministic` argument with the
opposite meaning of `training`. To conform, the inputs are combined in a
single structure using a `tuple`, the RNG is split and used the populate the
expected `dict`, and the Boolean flag is negated:
```python
def my_model_fn(params, rngs, input1, input2, deterministic):
...
if not deterministic:
dropout_rng = rngs["dropout"]
keep = jax.random.bernoulli(dropout_rng, dropout_rate, x.shape)
x = jax.numpy.where(keep, x / dropout_rate, 0)
...
...
return outputs
def my_model_wrapper_fn(params, rng, inputs, training):
input1, input2 = inputs
rng1, rng2 = jax.random.split(rng)
rngs = {"dropout": rng1, "preprocessing": rng2}
deterministic = not training
return my_model_fn(params, rngs, input1, input2, deterministic)
keras_layer = JaxLayer(my_model_wrapper_fn, params=initial_params)
```
## Usage with Haiku modules
`JaxLayer` enables the use of [Haiku](https://dm-haiku.readthedocs.io)
components in the form of
[`haiku.Module`](https://dm-haiku.readthedocs.io/en/latest/api.html#module).
This is achieved by transforming the module per the Haiku pattern and then
passing `module.apply` in the `call_fn` parameter and `module.init` in the
`init_fn` parameter if needed.
If the model has non-trainable state, it should be transformed with
[`haiku.transform_with_state`](
https://dm-haiku.readthedocs.io/en/latest/api.html#haiku.transform_with_state).
If the model has no non-trainable state, it should be transformed with
[`haiku.transform`](
https://dm-haiku.readthedocs.io/en/latest/api.html#haiku.transform).
Additionally, and optionally, if the module does not use RNGs in "apply", it
can be transformed with
[`haiku.without_apply_rng`](
https://dm-haiku.readthedocs.io/en/latest/api.html#without-apply-rng).
The following example shows how to create a `JaxLayer` from a Haiku module
that uses random number generators via `hk.next_rng_key()` and takes a
training positional argument:
```python
class MyHaikuModule(hk.Module):
def __call__(self, x, training):
x = hk.Conv2D(32, (3, 3))(x)
x = jax.nn.relu(x)
x = hk.AvgPool((1, 2, 2, 1), (1, 2, 2, 1), "VALID")(x)
x = hk.Flatten()(x)
x = hk.Linear(200)(x)
if training:
x = hk.dropout(rng=hk.next_rng_key(), rate=0.3, x=x)
x = jax.nn.relu(x)
x = hk.Linear(10)(x)
x = jax.nn.softmax(x)
return x
def my_haiku_module_fn(inputs, training):
module = MyHaikuModule()
return module(inputs, training)
transformed_module = hk.transform(my_haiku_module_fn)
keras_layer = JaxLayer(
call_fn=transformed_module.apply,
init_fn=transformed_module.init,
)
```
Args:
call_fn: The function to call the model. See description above for the
list of arguments it takes and the outputs it returns.
init_fn: the function to call to initialize the model. See description
above for the list of arguments it takes and the outputs it returns.
If `None`, then `params` and/or `state` must be provided.
params: A `PyTree` containing all the model trainable parameters. This
allows passing trained parameters or controlling the initialization.
If both `params` and `state` are `None`, `init_fn` is called at
build time to initialize the trainable parameters of the model.
state: A `PyTree` containing all the model non-trainable state. This
allows passing learned state or controlling the initialization. If
both `params` and `state` are `None`, and `call_fn` takes a `state`
argument, then `init_fn` is called at build time to initialize the
non-trainable state of the model.
seed: Seed for random number generator. Optional.
dtype: The dtype of the layer's computations and weights. Can also be a
`keras.DTypePolicy`. Optional. Defaults to the default policy.
"""
def __init__(
self,
call_fn,
init_fn=None,
params=None,
state=None,
seed=None,
**kwargs,
):
if backend.backend() not in ["jax", "tensorflow"]:
raise ValueError(
f"{self.__class__.__name__} is only supported with the JAX or"
f" Tensorflow backend. Current backend: {backend.backend()}"
)
if init_fn is None and params is None and state is None:
raise ValueError(
"`init_fn`, `params` and `state` cannot all be `None`."
)
super().__init__(**kwargs)
self.call_fn = call_fn
self.init_fn = init_fn
self.seed_generator = backend.random.SeedGenerator(seed)
self.tracked_params = self._create_variables(params, trainable=True)
self.tracked_state = self._create_variables(state, trainable=False)
if self.params is not None or self.state is not None:
self._build_at_init()
self.call_fn_arguments = self._validate_signature(
call_fn,
"call_fn",
{"params", "state", "rng", "inputs", "training"},
{"inputs"},
)
self.has_state = "state" in self.call_fn_arguments
if init_fn:
self.init_fn_arguments = self._validate_signature(
init_fn, "init_fn", {"rng", "inputs", "training"}, {"inputs"}
)
# Attributes for jax2tf functions
self.jax2tf_training_false_fn = None
self.jax2tf_training_true_fn = None
def _validate_signature(self, fn, fn_name, allowed, required):
fn_parameters = inspect.signature(fn).parameters
for parameter_name in required:
if parameter_name not in fn_parameters:
raise ValueError(
f"Missing required argument in `{fn_name}`: "
f"`{parameter_name}`"
)
parameter_names = []
for parameter in fn_parameters.values():
if parameter.name not in allowed:
raise ValueError(
f"Unsupported argument in `{fn_name}`: `{parameter.name}`, "
f"supported arguments are `{'`, `'.join(allowed)}`"
)
parameter_names.append(parameter.name)
return parameter_names
def _get_jax2tf_input_shape(self, input_shape):
"""Convert input shape in a format suitable for `jax2tf`.
`jax2tf` expects a letter for each unknown dimension, which allows
correlated dimensions. Since correlated dimensions are not supported by
Keras, we simply use 'a', 'b', 'c'..., for each unknown dimension. We
however use 'batch' for dimension 0 if not defined to correlate the
batch size across inputs.
Example (spaces added for readability):
```
input_shape: (None , 4 , None, None, 5 )
result: "(batch, 4 , a , b , 5 )"
```
Args:
input_shape: a single shape or a structure of shapes for the inputs.
Returns:
the shape or shapes structure in the `jax2tf` format as strings.
"""
dim_names = itertools.chain(
string.ascii_lowercase, # a, b, ... z
itertools.starmap( # aa, ab, ... az, ba, bb, ... zz
lambda a, b: a + b,
itertools.product(string.ascii_lowercase, repeat=2),
),
)
def get_single_jax2tf_shape(shape):
jax2tf_shape = []
for index, dim in enumerate(shape):
if dim is not None:
jax2tf_shape.append(str(dim))
elif index == 0:
jax2tf_shape.append("batch")
else:
jax2tf_shape.append(next(dim_names))
return "(" + ", ".join(jax2tf_shape) + ")"
res = tree.map_shape_structure(get_single_jax2tf_shape, input_shape)
return res
def _jax2tf_convert(self, fn, polymorphic_shapes):
from jax.experimental import jax2tf
converted_fn = jax2tf.convert(fn, polymorphic_shapes=polymorphic_shapes)
# Autograph won't work with the output of jax2tf.
converted_fn = tf.autograph.experimental.do_not_convert(converted_fn)
return converted_fn
def _partial_with_positional(self, fn, index, value):
"""Return a new partial with one positional argument set to a value.
This is needed because `jax2tf` only supports positional arguments and
`functools.partial` only supports setting positional arguments starting
from the left. Our use case is the `training` argument which is
typically the righmost argument.
Args:
fn: the function to wrap.
index: the index of the positional argument to set to `value`.
value: the value for the positional argument at `index`.
"""
@functools.wraps(fn)
def wrapper(*args):
args = args[0:index] + (value,) + args[index:]
return fn(*args)
return wrapper
@tracking.no_automatic_dependency_tracking
@tf_no_automatic_dependency_tracking
def _create_variables(self, values, trainable):
"""Create a structure of variables from a structure of JAX arrays.
`values` is traversed via JAX's `tree_map`. When a leaf is a JAX array
or a tensor-like object, a corresponding variable is created with it as
the initial value. The resulting structure of variables is assigned to
`self.params` or `self.state` depending on `trainable`. Then, a
flattened version of the variables is returned for tracking.
`self.params` or `self.state` are intentionally not tracked because
structures like `TrackedList` interfere with `jax.tree_utils`.
Note that leaf objects that are not JAX arrays and not tensor-like are
left intact as they are assumed to be configuration used by the model.
Args:
values: the structure of values to traverse.
trainable: whether to create trainable variables.
Returns:
flat list of variables initialized with `values` for tracking.
"""
def create_variable(value):
if backend.is_tensor(value) or isinstance(
value, (np.ndarray, np.generic, jax.Array)
):
dtype = value.dtype
if is_float_dtype(dtype):
dtype = None # Use the layer dtype policy
return self.add_weight(
value.shape,
initializer=backend.convert_to_tensor(value),
dtype=dtype,
trainable=trainable,
)
elif isinstance(value, (bool, int, float)):
dtype = standardize_dtype(type(value))
if is_float_dtype(dtype):
dtype = None # Use the layer dtype policy
return self.add_weight(
(),
initializer=backend.convert_to_tensor(value),
dtype=dtype,
trainable=trainable,
)
else:
return value
# Use JAX's tree_map as it understands registered classes.
variables = jax.tree_util.tree_map(create_variable, values)
if trainable:
self.params = variables
else:
self.state = variables
flat_variables, _ = jax.tree_util.tree_flatten(variables)
return flat_variables
def _get_init_rng(self):
"""
Returns a key in form of the backend array of size 2 dtype uint32
to pass to `init_fn`.
By default, this returns a Jax or TF array of size 2 by calling
`self.seed_generator.next()`. Override this to return a different
structure.
Returns:
a key as an Jax or TF array of size 2 dtype uint32 will be passed
as the `rng` argument of `init_fn`.
"""
return self.seed_generator.next()
def _get_call_rng(self, training):
"""
Returns a key in form of the backend array of size 2 dtype uint32
to pass to `call_fn`.
By default, this returns a Jax or TF array of size 2 by calling
`self.seed_generator.next()` when `training` is `True`, and `None` when
`training` is `False`. Override this to return a different structure or
to pass RNGs in inference mode too.
Returns:
a key as an Jax or TF array of size 2 dtype uint32 will be passed
as the `rng` argument of `call_fn`.
"""
if training:
return self.seed_generator.next()
else:
return None
def _initialize_weights(self, input_shape):
if jax_utils.is_in_jax_tracing_scope() or tf.inside_function():
# This exception is not actually shown, it is caught and a detailed
# warning about calling 'build' is printed.
raise ValueError(
"'JaxLayer' cannot be built in tracing scope"
"or inside tf function"
)
# Initialize `params` and `state` if needed by calling `init_fn`.
def create_input(shape):
shape = [d if d is not None else 1 for d in shape]
return jax.numpy.ones(shape)
init_inputs = tree.map_shape_structure(create_input, input_shape)
init_args = []
for argument_name in self.init_fn_arguments:
if argument_name == "rng":
init_args.append(
jax.tree_util.tree_map(
lambda x: jax.numpy.array(_convert_to_jax_key(x)),
self._get_init_rng(),
)
)
elif argument_name == "inputs":
init_args.append(init_inputs)
elif argument_name == "training":
init_args.append(True)
init_result = self.init_fn(*init_args)
if self.has_state:
init_params, init_state = init_result
else:
init_params, init_state = init_result, None
self.tracked_params = self._create_variables(
init_params, trainable=True
)
self.tracked_state = self._create_variables(init_state, trainable=False)
def build(self, input_shape):
if self.params is None and self.state is None:
self._initialize_weights(input_shape)
if backend.backend() == "tensorflow":
polymorphic_shapes = []
for argument in self.call_fn_arguments:
if argument == "inputs":
polymorphic_shapes.append(
self._get_jax2tf_input_shape(input_shape)
)
elif argument != "training":
# params, state, rng
polymorphic_shapes.append("...")
if "training" in self.call_fn_arguments:
training_argument_index = self.call_fn_arguments.index(
"training"
)
self.jax2tf_training_false_fn = self._jax2tf_convert(
self._partial_with_positional(
self.call_fn, training_argument_index, False
),
polymorphic_shapes,
)
self.jax2tf_training_true_fn = self._jax2tf_convert(
self._partial_with_positional(
self.call_fn, training_argument_index, True
),
polymorphic_shapes,
)
else:
self.jax2tf_training_false_fn = self._jax2tf_convert(
self.call_fn,
polymorphic_shapes,
)
self.jax2tf_training_true_fn = None
super().build(input_shape)
def call(self, inputs, training=False):
def unwrap_variable(variable):
return None if variable is None else variable.value
call_args = []
for argument_name in self.call_fn_arguments:
if argument_name == "params":
call_args.append(
jax.tree_util.tree_map(unwrap_variable, self.params)
)
elif argument_name == "state":
call_args.append(
jax.tree_util.tree_map(unwrap_variable, self.state)
)
elif argument_name == "rng":
call_args.append(
jax.tree_util.tree_map(
_convert_to_jax_key, self._get_call_rng(training)
)
)
elif argument_name == "inputs":
call_args.append(inputs)
elif argument_name == "training":
if backend.backend() == "jax":
call_args.append(training)
def assign_state_to_variable(value, variable):
# This exists only to make debugging this error case easier.
if not hasattr(variable, "assign"):
raise ValueError(
"Structure mismatch: the structure of the state returned "
"by `call` does not match the structure of the state at "
"initialization time."
)
variable.assign(value)
def call_with_fn(fn):
if self.has_state:
predictions, new_state = fn(*call_args)
jax.tree_util.tree_map(
assign_state_to_variable, new_state, self.state
)
return predictions
else:
return fn(*call_args)
if backend.backend() == "jax":
return call_with_fn(self.call_fn)
elif backend.backend() == "tensorflow":
if training and self.jax2tf_training_true_fn is not None:
return call_with_fn(self.jax2tf_training_true_fn)
else:
return call_with_fn(self.jax2tf_training_false_fn)
def get_config(self):
config = {
"call_fn": serialization_lib.serialize_keras_object(self.call_fn),
"init_fn": serialization_lib.serialize_keras_object(self.init_fn),
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
call_fn = serialization_lib.deserialize_keras_object(config["call_fn"])
init_fn = serialization_lib.deserialize_keras_object(config["init_fn"])
config["call_fn"] = call_fn
config["init_fn"] = init_fn
return super().from_config(config)
@keras_export("keras.layers.FlaxLayer")
| JaxLayer |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 58148,
"end": 64155
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Qwen2_5OmniThinkerConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[Qwen2_5OmniConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
# Ignore copy
def forward(self, x, position_ids):
# In contrast to other models, Qwen2_5Omni has different position ids for the grids
# So we expand the inv_freq to shape (3, ...)
inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1):
"""Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/).
Explanation:
Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding
sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For
vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately.
Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding.
For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal,
height and width) of text embedding is always the same, so the text embedding rotary position embedding has no
difference with modern LLMs.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`):
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
used to pass offsetted position ids when working with a KV-cache.
mrope_section(`List(int)`):
Multimodal rope section is for channel dimension of temporal, height and width in rope calculation.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
mrope_section = mrope_section * 2
cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
)
sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
| Qwen2_5OmniRotaryEmbedding |
python | redis__redis-py | redis/http/http_client.py | {
"start": 670,
"end": 1433
} | class ____:
status: int
headers: Dict[str, str]
url: str
content: bytes
def text(self, encoding: Optional[str] = None) -> str:
enc = encoding or self._get_encoding()
return self.content.decode(enc, errors="replace")
def json(self) -> Any:
return json.loads(self.text(encoding=self._get_encoding()))
def _get_encoding(self) -> str:
# Try to infer encoding from headers; default to utf-8
ctype = self.headers.get("content-type", "")
# Example: application/json; charset=utf-8
for part in ctype.split(";"):
p = part.strip()
if p.lower().startswith("charset="):
return p.split("=", 1)[1].strip() or "utf-8"
return "utf-8"
| HttpResponse |
python | scipy__scipy | scipy/fftpack/tests/test_basic.py | {
"start": 25090,
"end": 30472
} | class ____:
"""Check input overwrite behavior of the FFT functions."""
real_dtypes = (np.float32, np.float64)
dtypes = real_dtypes + (np.complex64, np.complex128)
fftsizes = [8, 16, 32]
def _check(self, x, routine, fftsize, axis, overwrite_x):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = (f"{routine.__name__}({x.dtype}{x.shape!r}, {fftsize!r}, "
f"axis={axis!r}, overwrite_x={overwrite_x!r})")
if not overwrite_x:
assert_equal(x2, x, err_msg=f"spurious overwrite in {sig}")
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
fftsize, overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = (np.complex128, np.complex64)
self._check_1d(fft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(ifft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
@pytest.mark.parametrize('dtype', real_dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = self.real_dtypes
self._check_1d(irfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(rfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
if axes is None:
part_shape = shape
else:
part_shape = tuple(np.take(shape, axes))
for fftshape in fftshape_iter(part_shape):
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x)
if data.ndim > 1:
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), None),
((16,), (0,)),
((16, 2), (0,)),
((2, 16), (1,)),
((8, 16), None),
((8, 16), (0, 1)),
((8, 16, 2), (0, 1)),
((8, 16, 2), (1, 2)),
((8, 16, 2), (0,)),
((8, 16, 2), (1,)),
((8, 16, 2), (2,)),
((8, 16, 2), None),
((8, 16, 2), (0, 1, 2))])
def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
overwritable = (np.complex128, np.complex64)
self._check_nd_one(fftn, dtype, shape, axes, overwritable,
overwrite_x)
self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
overwrite_x)
@pytest.mark.parametrize('func', [fftn, ifftn, fft2])
def test_shape_axes_ndarray(func):
# Test fftn and ifftn work with NumPy arrays for shape and axes arguments
# Regression test for gh-13342
a = np.random.rand(10, 10)
expect = func(a, shape=(5, 5))
actual = func(a, shape=np.array([5, 5]))
assert_equal(expect, actual)
expect = func(a, axes=(-1,))
actual = func(a, axes=np.array([-1,]))
assert_equal(expect, actual)
expect = func(a, shape=(4, 7), axes=(1, 0))
actual = func(a, shape=np.array([4, 7]), axes=np.array([1, 0]))
assert_equal(expect, actual)
| TestOverwrite |
python | mlflow__mlflow | mlflow/gateway/providers/mlflow.py | {
"start": 478,
"end": 1393
} | class ____(BaseModel):
predictions: list[StrictStr]
@field_validator("predictions", mode="before")
def extract_choices(cls, predictions):
if isinstance(predictions, list) and not predictions:
raise ValueError("The input list is empty")
if isinstance(predictions, dict):
if "choices" not in predictions and len(predictions) > 1:
raise ValueError(
"The dict format is invalid for this route type. Ensure the served model "
"returns a dict key containing 'choices'"
)
if len(predictions) == 1:
predictions = next(iter(predictions.values()))
else:
predictions = predictions.get("choices", predictions)
if not predictions:
raise ValueError("The input list is empty")
return predictions
| ServingTextResponse |
python | django-mptt__django-mptt | tests/myapp/tests.py | {
"start": 63837,
"end": 68757
} | class ____(TreeTestCase):
fixtures = ["categories.json", "items.json", "subitems.json"]
def test_add_related_count_with_fk_to_natural_key(self):
# Regression test for #284
queryset = Category.objects.filter(name="Xbox 360").order_by("id")
# Test using FK that doesn't point to a primary key
for c in Category.objects.add_related_count(
queryset, Item, "category_fk", "item_count", cumulative=False
):
self.assertEqual(c.item_count, c.items_by_pk.count())
# Also works when using the FK that *does* point to a primary key
for c in Category.objects.add_related_count(
queryset, Item, "category_pk", "item_count", cumulative=False
):
self.assertEqual(c.item_count, c.items_by_pk.count())
def test_add_related_count_multistep(self):
queryset = Category.objects.filter(name="Xbox 360").order_by("id")
topqueryset = Category.objects.filter(name="PC & Video Games").order_by("id")
# Test using FK that doesn't point to a primary key
for c in Category.objects.add_related_count(
queryset, SubItem, "item__category_fk", "subitem_count", cumulative=False
):
self.assertEqual(c.subitem_count, 1)
for topc in Category.objects.add_related_count(
topqueryset, SubItem, "item__category_fk", "subitem_count", cumulative=False
):
self.assertEqual(topc.subitem_count, 1)
# Also works when using the FK that *does* point to a primary key
for c in Category.objects.add_related_count(
queryset, SubItem, "item__category_pk", "subitem_count", cumulative=False
):
self.assertEqual(c.subitem_count, 1)
for topc in Category.objects.add_related_count(
topqueryset, SubItem, "item__category_pk", "subitem_count", cumulative=False
):
self.assertEqual(topc.subitem_count, 1)
# Test using FK that doesn't point to a primary key, cumulative
for c in Category.objects.add_related_count(
queryset, SubItem, "item__category_fk", "subitem_count", cumulative=True
):
self.assertEqual(c.subitem_count, 1)
for topc in Category.objects.add_related_count(
topqueryset, SubItem, "item__category_fk", "subitem_count", cumulative=True
):
self.assertEqual(topc.subitem_count, 2)
# Also works when using the FK that *does* point to a primary key, cumulative
for c in Category.objects.add_related_count(
queryset, SubItem, "item__category_pk", "subitem_count", cumulative=True
):
self.assertEqual(c.subitem_count, 1)
for topc in Category.objects.add_related_count(
topqueryset, SubItem, "item__category_pk", "subitem_count", cumulative=True
):
self.assertEqual(topc.subitem_count, 2)
def test_add_related_count_with_extra_filters(self):
"""Test that filtering by extra_filters works"""
queryset = Category.objects.all()
# Test using FK that doesn't point to a primary key
for c in Category.objects.add_related_count(
queryset,
Item,
"category_fk",
"item_count",
cumulative=False,
extra_filters={"name": "Halo: Reach"},
):
if c.pk == 5:
self.assertEqual(c.item_count, 1)
else:
self.assertEqual(c.item_count, 0)
# Also works when using the FK that *does* point to a primary key
for c in Category.objects.add_related_count(
queryset,
Item,
"category_pk",
"item_count",
cumulative=False,
extra_filters={"name": "Halo: Reach"},
):
if c.pk == 5:
self.assertEqual(c.item_count, 1)
else:
self.assertEqual(c.item_count, 0)
# Test using FK that doesn't point to a primary key
for c in Category.objects.add_related_count(
queryset,
Item,
"category_fk",
"item_count",
cumulative=True,
extra_filters={"name": "Halo: Reach"},
):
if c.pk in (5, 1):
self.assertEqual(c.item_count, 1)
else:
self.assertEqual(c.item_count, 0)
# Also works when using the FK that *does* point to a primary key
for c in Category.objects.add_related_count(
queryset,
Item,
"category_pk",
"item_count",
cumulative=True,
extra_filters={"name": "Halo: Reach"},
):
if c.pk in (5, 1):
self.assertEqual(c.item_count, 1)
else:
self.assertEqual(c.item_count, 0)
| TreeManagerTestCase |
python | joke2k__faker | faker/providers/person/en_TH/__init__.py | {
"start": 44,
"end": 6008
} | class ____(PersonProvider):
formats = (
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{prefix}} {{first_name}} {{last_name}}",
)
prefixes_male = (
"GEN",
"LT GEN",
"MAJ GEN",
"COL",
"LT COL",
"MAJ",
"CAPT",
"LT",
"SUB LT",
"S M 1",
"S M 2",
"S M 3",
"SGT",
"CPL",
"PFC",
"PVT",
"ADM",
"V ADM",
"R ADM",
"CAPT",
"CDR",
"L CDR",
"LT",
"LT JG",
"SUB LT",
"CPO 1",
"CPO 2",
"CPO 3",
"PO 1",
"PO 2",
"PO 3",
"SEA-MAN",
"ACM",
"AM",
"AVM",
"GP CAPT",
"WG CDR",
"SQN LDR",
"FLT LT",
"FLG OFF",
"PLT OFF",
"FS 1",
"FS 2",
"FS 3",
"SGT",
"CPL",
"LAC",
"AMN",
"POL GEN",
"POL LT GEN",
"POL MAJ GEN",
"POL COL",
"POL LT COL",
"POL MAJ",
"POL CAPT",
"POL LT",
"POL SUB LT",
"POL SEN SGT MAJ",
"POL SGT MAJ",
"POL SGT",
"POL CPL",
"POL L/C",
"POL CONST",
"MR",
"REV",
"M L",
"M R",
"SAMANERA",
"PHRA",
"PHRA ATHIKAN",
"CHAO ATHIKAN",
"PHRAPALAD",
"PHRASAMU",
"PHRABAIDIKA",
"PHRAKHU PALAD",
"PHRAKHU SAMU",
"PHRAKHU BAIDIKA",
"PHRAMAHA",
"PHRAKHU DHAMMADHORN",
"PHRAKHU VINAIDHORN",
)
prefixes_female = (
"GEN",
"LT GEN",
"MAJ GEN",
"COL",
"LT COL",
"MAJ",
"CAPT",
"LT",
"SUB LT",
"S M 1",
"S M 2",
"S M 3",
"SGT",
"CPL",
"PFC",
"PVT",
"ADM",
"V ADM",
"R ADM",
"CAPT",
"CDR",
"L CDR",
"LT",
"LT JG",
"SUB LT",
"CPO 1",
"CPO 2",
"CPO 3",
"PO 1",
"PO 2",
"PO 3",
"SEA-MAN",
"ACM",
"AM",
"AVM",
"GP CAPT",
"WG CDR",
"SQN LDR",
"FLT LT",
"FLG OFF",
"PLT OFF",
"FS 1",
"FS 2",
"FS 3",
"SGT",
"CPL",
"LAC",
"AMN",
"POL GEN",
"POL LT GEN",
"POL MAJ GEN",
"POL COL",
"POL LT COL",
"POL MAJ",
"POL CAPT",
"POL LT",
"POL SUB LT",
"POL SEN SGT MAJ",
"POL SGT MAJ",
"POL SGT",
"POL CPL",
"POL L/C",
"POL CONST",
"MRS",
"MISS",
"REV",
"M L",
)
prefixes = prefixes_male + prefixes_female
first_names = (
"Pornchanok",
"Patchaploy",
"Peem",
"Kodchaporn",
"Pattapon",
"Sarunporn",
"Jinjuta",
"Sorawut",
"Suvakit",
"Prima",
"Darin",
"Pintusorn",
"Kulnun",
"Nutcha",
"Nutkrita",
"Sittikorn",
"Wasin",
"Apisara",
"Nattawun",
"Tunradee",
"Niracha",
"Tunchanok",
"Kamolchanok",
"Jaruwan",
"Pachongruk",
"Pakjira",
"Pattatomporn",
"Suwijuk",
"Noppakao",
"Ratchanon",
"Atit",
"Kunaporn",
"Arisara",
"Todsawun",
"Chaiwut",
"Puntira",
"Supasita",
"Patcharaporn",
"Phubes",
"Pattamon",
"Chanya",
"Pannawich",
"Chawin",
"Pada",
"Chanikan",
"Nutwadee",
"Chalisa",
"Prames",
"Supasit",
"Sitiwat",
"Teetat",
"Yada",
"Phenphitcha",
"Anon",
"Chaifah",
"Pawan",
"Aunyaporn",
"Yanisa",
"Pak",
"Chayanin",
"Chayapat",
"Jitrin",
"Wassaya",
"Pitipat",
"Nichakarn",
"Parin",
"Thanatcha",
)
last_names = (
"Prachayaroch",
"Prachayaroch",
"Kamalanon",
"Tianvarich",
"Bunlerngsri",
"Sukhenai",
"Posalee",
"Chaisatit",
"Sujjaboriboon",
"Kamalanon",
"Neerachapong",
"Pianduangsri",
"Pasuk",
"Losatapornpipit",
"Suraprasert",
"Matinawin",
"Choeychuen",
"Wasunun",
"Kumsoontorn",
"Sireelert",
"Boonpungbaramee",
"Sorattanachai",
"Benchapatranon",
"Intaum",
"Pikatsingkorn",
"Srisoontorn",
"Polpo",
"Kongchayasukawut",
"Charoensuksopol",
"Bunlupong",
"Chomsri",
"Tungkasethakul",
"Chowitunkit",
"Todsapornpitakul",
"Wimolnot",
"Kittakun",
"Methavorakul",
"Pitanuwat",
"Phusilarungrueng",
"Turongkinanon",
"Kitprapa",
"Pothanun",
"Youprasert",
"Methavorakul",
"Vethayasas",
"Sooksawang",
"Anekvorakul",
"Pichpandecha",
"Sittisaowapak",
"Suraprachit",
"Kongsri",
"Trikasemmart",
"Habpanom",
"Wannapaitoonsri",
"Vinyuvanichkul",
"Pongpanitch",
"Permchart",
"Chaihirankarn",
"Thantananont",
"Norramon",
"Prayoonhong",
"Lertsattayanusak",
"Polauaypon",
"Prakalpawong",
"Titipatrayunyong",
"Krittayanukoon",
"Siripaiboo",
)
| Provider |
python | chroma-core__chroma | chromadb/telemetry/opentelemetry/__init__.py | {
"start": 527,
"end": 1404
} | class ____(Enum):
"""The granularity of the OpenTelemetry spans."""
NONE = "none"
"""No spans are emitted."""
OPERATION = "operation"
"""Spans are emitted for each operation."""
OPERATION_AND_SEGMENT = "operation_and_segment"
"""Spans are emitted for each operation and segment."""
ALL = "all"
"""Spans are emitted for almost every method call."""
# Greater is more restrictive. So "all" < "operation" (and everything else),
# "none" > everything.
def __lt__(self, other: Any) -> bool:
"""Compare two granularities."""
order = [
OpenTelemetryGranularity.ALL,
OpenTelemetryGranularity.OPERATION_AND_SEGMENT,
OpenTelemetryGranularity.OPERATION,
OpenTelemetryGranularity.NONE,
]
return order.index(self) < order.index(other)
| OpenTelemetryGranularity |
python | coleifer__peewee | tests/psycopg3_ext.py | {
"start": 1108,
"end": 1214
} | class ____(TestModel):
title = CharField()
data = TextField()
fts_data = TSVectorField()
| FTSModel |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F841_0.py | {
"start": 2706,
"end": 2779
} | class ____:
def set_class(self, cls):
__class__ = cls # F841
| A |
python | google__jax | docs/autodidax2_part1.py | {
"start": 8572,
"end": 8661
} | class ____:
interpreter : Interpreter
primal : float
tangent : float
| TaggedDualNumber |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/resolvelib/structs.py | {
"start": 2042,
"end": 3147
} | class ____(collections_abc.Mapping):
def __init__(self, mapping, accessor, appends=None):
self._mapping = mapping
self._accessor = accessor
self._appends = appends or {}
def __repr__(self):
return "IteratorMapping({!r}, {!r}, {!r})".format(
self._mapping,
self._accessor,
self._appends,
)
def __bool__(self):
return bool(self._mapping or self._appends)
__nonzero__ = __bool__ # XXX: Python 2.
def __contains__(self, key):
return key in self._mapping or key in self._appends
def __getitem__(self, k):
try:
v = self._mapping[k]
except KeyError:
return iter(self._appends[k])
return itertools.chain(self._accessor(v), self._appends.get(k, ()))
def __iter__(self):
more = (k for k in self._appends if k not in self._mapping)
return itertools.chain(self._mapping, more)
def __len__(self):
more = sum(1 for k in self._appends if k not in self._mapping)
return len(self._mapping) + more
| IteratorMapping |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 20939,
"end": 22800
} | class ____(graphene.Union):
class Meta:
types = (
GrapheneExecutionStepFailureEvent,
GrapheneExecutionStepInputEvent,
GrapheneExecutionStepOutputEvent,
GrapheneExecutionStepSkippedEvent,
GrapheneExecutionStepStartEvent,
GrapheneExecutionStepSuccessEvent,
GrapheneExecutionStepUpForRetryEvent,
GrapheneExecutionStepRestartEvent,
GrapheneHealthChangedEvent,
GrapheneLogMessageEvent,
GrapheneResourceInitFailureEvent,
GrapheneResourceInitStartedEvent,
GrapheneResourceInitSuccessEvent,
GrapheneRunFailureEvent,
GrapheneRunStartEvent,
GrapheneRunEnqueuedEvent,
GrapheneRunDequeuedEvent,
GrapheneRunStartingEvent,
GrapheneRunCancelingEvent,
GrapheneRunCanceledEvent,
GrapheneRunSuccessEvent,
GrapheneStepWorkerStartedEvent,
GrapheneStepWorkerStartingEvent,
GrapheneHandledOutputEvent,
GrapheneLoadedInputEvent,
GrapheneLogsCapturedEvent,
GrapheneObjectStoreOperationEvent,
GrapheneStepExpectationResultEvent,
GrapheneMaterializationEvent,
GrapheneObservationEvent,
GrapheneFailedToMaterializeEvent,
GrapheneEngineEvent,
GrapheneHookCompletedEvent,
GrapheneHookSkippedEvent,
GrapheneHookErroredEvent,
GrapheneAlertStartEvent,
GrapheneAlertSuccessEvent,
GrapheneAlertFailureEvent,
GrapheneAssetMaterializationPlannedEvent,
GrapheneAssetCheckEvaluationPlannedEvent,
GrapheneAssetCheckEvaluationEvent,
)
name = "DagsterRunEvent"
| GrapheneDagsterRunEvent |
python | jina-ai__jina | jina/clients/base/unary_rpc.py | {
"start": 372,
"end": 4236
} | class ____:
"""Class that encapsulated the methods required to run unary rpc calls from the client. Instantiate a single class
for each client request.
"""
def __init__(
self,
channel,
continue_on_error,
metadata,
on_always,
on_done,
on_error,
p_bar,
req_iter,
max_attempts,
backoff_multiplier,
initial_backoff,
max_backoff,
logger,
show_progress,
compression,
client_args,
prefetch,
results_in_order,
**kwargs
):
self.results_in_order = results_in_order
self.prefetch = prefetch
self.client_args = client_args
self.compression = compression
self.show_progress = show_progress
self.logger = logger
self.max_backoff = max_backoff
self.initial_backoff = initial_backoff
self.backoff_multiplier = backoff_multiplier
self.max_attempts = max_attempts
self.req_iter = req_iter
self.p_bar = p_bar
self.on_error = on_error
self.on_done = on_done
self.on_always = on_always
self.metadata = metadata
self.continue_on_error = continue_on_error
self.channel = channel
self.kwargs = kwargs
async def unary_rpc_with_retry(self):
"""Wraps the unary rpc call with retry loop based on the retry params.
:yields: Responses received from the target.
"""
stub = jina_pb2_grpc.JinaSingleDataRequestRPCStub(self.channel)
def _request_handler(
request: 'Request', **kwargs
) -> 'Tuple[asyncio.Future, Optional[asyncio.Future]]':
async def _with_retry(req: 'Request'):
for attempt in range(1, self.max_attempts + 1):
try:
return await stub.process_single_data(
req,
compression=self.compression,
metadata=self.metadata,
credentials=self.kwargs.get('credentials', None),
timeout=self.kwargs.get('timeout', None),
)
except (
grpc.aio.AioRpcError,
InternalNetworkError,
) as err:
await wait_or_raise_err(
attempt=attempt,
err=err,
max_attempts=self.max_attempts,
backoff_multiplier=self.backoff_multiplier,
initial_backoff=self.initial_backoff,
max_backoff=self.max_backoff,
)
return (
asyncio.ensure_future(_with_retry(request)),
None,
)
def _result_handler(resp):
callback_exec(
response=resp,
logger=self.logger,
on_error=self.on_error,
on_done=self.on_done,
on_always=self.on_always,
continue_on_error=self.continue_on_error,
)
return resp
streamer_args = vars(self.client_args)
if self.prefetch:
streamer_args['prefetch'] = self.prefetch
streamer = RequestStreamer(
request_handler=_request_handler,
result_handler=_result_handler,
iterate_sync_in_thread=False,
logger=self.logger,
**streamer_args,
)
async for response in streamer.stream(
request_iterator=self.req_iter, results_in_order=self.results_in_order
):
if self.show_progress:
self.p_bar.update()
yield response
| UnaryRpc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.