language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-beautiful-subarrays.py | {
"start": 78,
"end": 423
} | class ____(object):
def beautifulSubarrays(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
cnt = collections.Counter()
cnt[0] = 1
result = curr = 0
for x in nums:
curr ^= x
result += cnt[curr]
cnt[curr] += 1
return result
| Solution |
python | pypa__warehouse | tests/unit/packaging/test_services.py | {
"start": 22524,
"end": 23199
} | class ____:
def test_verify_service(self):
assert verifyClass(IFileStorage, S3ArchiveFileStorage)
def test_create_service(self):
session = boto3.session.Session(
aws_access_key_id="foo", aws_secret_access_key="bar"
)
request = pretend.stub(
find_service=pretend.call_recorder(lambda name: session),
registry=pretend.stub(settings={"archive_files.bucket": "froblob"}),
)
storage = S3ArchiveFileStorage.create_service(None, request)
assert request.find_service.calls == [pretend.call(name="aws.session")]
assert storage.bucket.name == "froblob"
| TestS3ArchiveFileStorage |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 263426,
"end": 265465
} | class ____(ExternKernelOut):
@classmethod
def create(cls, x: IRNode, device: torch.device, non_blocking: bool) -> IRNode:
if (
not x.is_extern()
# Can not apply this optimization if x has been mutated
and try_get_name(x) not in V.graph.mutated_buffers
and all(r in V.graph.constants for r in x.get_read_names())
and not config.aot_inductor.use_runtime_constant_folding
):
return x.constant_to_device(device)
V.graph.add_device_info(device)
x_device = x.get_device()
assert x_device is not None
V.graph.add_device_info(x_device)
developer_warning("DeviceCopy in input program")
constant_args = (non_blocking,)
# Device Copy should keep the same layout as input
x = ExternKernel.require_contiguous(x)
stride = None
if x.get_size():
# x.get_stride() may be unimplemented if x's size is empty
stride = x.get_stride()
is_destination_pinned = (
is_gpu(x_device.type) and device.type == "cpu" and non_blocking
)
is_source_pinned = (
x_device.type == "cpu" and is_gpu(device.type) and non_blocking
)
if is_source_pinned and is_storage_and_layout(x):
x.get_layout().is_pinned = True
return DeviceCopy(
FixedLayout(
device,
x.get_dtype(),
x.get_size(),
stride,
is_pinned=is_destination_pinned,
),
[cls.realize_input(x)],
constant_args,
)
def codegen(self, wrapper: PythonWrapperCodegen) -> None:
args = self.codegen_args()
assert len(args) == 2
if self.output_view:
wrapper.codegen_device_copy(
args[0], self.output_view.codegen_reference(), args[1]
)
else:
wrapper.codegen_device_copy(args[0], self.codegen_reference(), args[1])
| DeviceCopy |
python | huggingface__transformers | tests/models/dpt/test_modeling_dpt.py | {
"start": 11470,
"end": 16911
} | class ____(unittest.TestCase):
def test_inference_depth_estimation(self):
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large").to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
predicted_depth = outputs.predicted_depth
# verify the predicted depth
expected_shape = torch.Size((1, 384, 384))
self.assertEqual(predicted_depth.shape, expected_shape)
expectations = Expectations(
{
(None, None): [[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]],
("cuda", 8): [[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
def test_inference_semantic_segmentation(self):
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade")
model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 150, 480, 480))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor(
[[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]
).to(torch_device)
torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_post_processing_semantic_segmentation(self):
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade")
model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
outputs.logits = outputs.logits.detach().cpu()
segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)])
expected_shape = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape, expected_shape)
segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs)
expected_shape = torch.Size((480, 480))
self.assertEqual(segmentation[0].shape, expected_shape)
def test_post_processing_depth_estimation(self):
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
# forward pass
with torch.no_grad():
outputs = model(**inputs)
predicted_depth = image_processor.post_process_depth_estimation(outputs=outputs)[0]["predicted_depth"]
expected_shape = torch.Size((384, 384))
self.assertTrue(predicted_depth.shape == expected_shape)
predicted_depth_l = image_processor.post_process_depth_estimation(outputs=outputs, target_sizes=[(500, 500)])
predicted_depth_l = predicted_depth_l[0]["predicted_depth"]
expected_shape = torch.Size((500, 500))
self.assertTrue(predicted_depth_l.shape == expected_shape)
output_enlarged = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(0).unsqueeze(1), size=(500, 500), mode="bicubic", align_corners=False
).squeeze()
self.assertTrue(output_enlarged.shape == expected_shape)
torch.testing.assert_close(predicted_depth_l, output_enlarged, atol=1e-3, rtol=1e-3)
@pytest.mark.torch_export_test
def test_export(self):
for strict in [True, False]:
with self.subTest(strict=strict):
if not is_torch_greater_or_equal_than_2_4:
self.skipTest(reason="This test requires torch >= 2.4 to run.")
model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device).eval()
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade")
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
exported_program = torch.export.export(
model,
args=(inputs["pixel_values"],),
strict=strict,
)
with torch.no_grad():
eager_outputs = model(**inputs)
exported_outputs = exported_program.module().forward(inputs["pixel_values"])
self.assertEqual(eager_outputs.logits.shape, exported_outputs.logits.shape)
torch.testing.assert_close(eager_outputs.logits, exported_outputs.logits, rtol=1e-4, atol=1e-4)
| DPTModelIntegrationTest |
python | falconry__falcon | falcon/_typing.py | {
"start": 7387,
"end": 7677
} | class ____(Protocol[_AReqT, _ARespT]):
"""ASGI middleware with resource handler."""
async def process_resource(
self,
req: _AReqT,
resp: _ARespT,
resource: object,
params: Mapping[str, Any],
) -> None: ...
| AsgiMiddlewareWithProcessResource |
python | tensorflow__tensorflow | tensorflow/python/ops/math_grad_test.py | {
"start": 17385,
"end": 17963
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testFloorModGradient(self):
# Making sure the input is not near the discontinuity point where
# x/y == floor(x/y)
ns = constant_op.constant([17.], dtype=dtypes.float32)
inputs = constant_op.constant([131.], dtype=dtypes.float32)
floor_mod = math_ops.floormod(inputs, ns)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1],
floor_mod, [1])
self.assertLess(error, 1e-4)
| FloorModGradientTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-moves-to-kill-all-pawns.py | {
"start": 119,
"end": 2008
} | class ____(object):
def maxMoves(self, kx, ky, positions):
"""
:type kx: int
:type ky: int
:type positions: List[List[int]]
:rtype: int
"""
N = 50
DIRECTIONS = ((1, 2), (-1, 2), (1, -2), (-1, -2), (2, 1), (-2, 1), (2, -1), (-2, -1))
POS_INF = float("inf")
NEG_INF = float("-inf")
def popcount(r):
return bin(r)[2:].count('1')
def bfs(r, c):
dist = [[POS_INF]*N for _ in xrange(N)]
dist[r][c] = 0
q = [(r, c)]
while q:
new_q = []
for r, c in q:
for dr, dc in DIRECTIONS:
nr, nc = r+dr, c+dc
if not (0 <= nr < N and 0 <= nc < N and dist[nr][nc] == POS_INF):
continue
dist[nr][nc] = dist[r][c]+1
new_q.append((nr, nc))
q = new_q
return dist
p = len(positions)
positions.append([kx, ky])
dist = [[0]*(p+1) for _ in xrange(p+1)]
for i, (r, c) in enumerate(positions):
d = bfs(r, c)
for j in xrange(i+1, p+1):
dist[j][i] = dist[i][j] = d[positions[j][0]][positions[j][1]]
dp = [[POS_INF if popcount(mask)&1 else NEG_INF]*p for mask in xrange(1<<p)]
dp[-1] = [0]*p
for mask in reversed(xrange(1, 1<<p)):
fn = (max, min)[(popcount(mask)&1)^1]
for i in xrange(p):
if (mask&(1<<i)) == 0:
continue
for j in xrange(p):
if j == i or (mask&(1<<j)) == 0:
continue
dp[mask^(1<<i)][j] = fn(dp[mask^(1<<i)][j], dp[mask][i]+dist[i][j])
return max(dp[1<<i][i]+dist[i][p] for i in xrange(p))
| Solution |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_orm.py | {
"start": 15509,
"end": 17135
} | class ____(NoCache, fixtures.MappedTest):
__requires__ = ("python_profiling_backend",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data1", String(20)),
Column("data2", String(20)),
Column("data3", String(20)),
Column("data4", String(20)),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Parent = cls.classes.Parent
parent = cls.tables.parent
cls.mapper_registry.map_imperatively(Parent, parent)
def _fixture(self):
Parent = self.classes.Parent
sess = fixture_session()
sess.add_all(
[
Parent(data1="d1", data2="d2", data3="d3", data4="d4")
for i in range(10)
]
)
sess.commit()
sess.close()
def test_query_cols(self):
Parent = self.classes.Parent
self._fixture()
sess = fixture_session()
# warm up cache
for attr in [Parent.data1, Parent.data2, Parent.data3, Parent.data4]:
attr.__clause_element__()
@profiling.function_call_count()
def go():
for i in range(10):
q = sess.query(
Parent.data1, Parent.data2, Parent.data3, Parent.data4
)
q.all()
go()
| QueryTest |
python | python-openxml__python-docx | src/docx/image/png.py | {
"start": 3945,
"end": 5801
} | class ____:
"""Extracts chunks from a PNG image stream."""
def __init__(self, stream_rdr):
super(_ChunkParser, self).__init__()
self._stream_rdr = stream_rdr
@classmethod
def from_stream(cls, stream):
"""Return a |_ChunkParser| instance that can extract the chunks from the PNG
image in `stream`."""
stream_rdr = StreamReader(stream, BIG_ENDIAN)
return cls(stream_rdr)
def iter_chunks(self):
"""Generate a |_Chunk| subclass instance for each chunk in this parser's PNG
stream, in the order encountered in the stream."""
for chunk_type, offset in self._iter_chunk_offsets():
chunk = _ChunkFactory(chunk_type, self._stream_rdr, offset)
yield chunk
def _iter_chunk_offsets(self):
"""Generate a (chunk_type, chunk_offset) 2-tuple for each of the chunks in the
PNG image stream.
Iteration stops after the IEND chunk is returned.
"""
chunk_offset = 8
while True:
chunk_data_len = self._stream_rdr.read_long(chunk_offset)
chunk_type = self._stream_rdr.read_str(4, chunk_offset, 4)
data_offset = chunk_offset + 8
yield chunk_type, data_offset
if chunk_type == "IEND":
break
# incr offset for chunk len long, chunk type, chunk data, and CRC
chunk_offset += 4 + 4 + chunk_data_len + 4
def _ChunkFactory(chunk_type, stream_rdr, offset):
"""Return a |_Chunk| subclass instance appropriate to `chunk_type` parsed from
`stream_rdr` at `offset`."""
chunk_cls_map = {
PNG_CHUNK_TYPE.IHDR: _IHDRChunk,
PNG_CHUNK_TYPE.pHYs: _pHYsChunk,
}
chunk_cls = chunk_cls_map.get(chunk_type, _Chunk)
return chunk_cls.from_offset(chunk_type, stream_rdr, offset)
| _ChunkParser |
python | scipy__scipy | scipy/stats/tests/test_hypotests.py | {
"start": 8707,
"end": 41078
} | class ____:
# All magic numbers are from R wilcox.test unless otherwise specified
# https://rdrr.io/r/stats/wilcox.test.html
# --- Test Input Validation ---
@pytest.mark.skip_xp_backends("jax.numpy", reason="lazy -> no _axis_nan_policy")
def test_empty(self, xp):
x = xp.asarray([1, 2]) # generic, valid inputs
y = xp.asarray([3, 4])
empty = xp.asarray([], dtype=x.dtype)
nan = xp.asarray(xp.nan)
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
res = mannwhitneyu(x, empty)
xp_assert_close(res.statistic, nan)
xp_assert_close(res.pvalue, nan)
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
res = mannwhitneyu(empty, y)
xp_assert_close(res.statistic, nan)
xp_assert_close(res.pvalue, nan)
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
res = mannwhitneyu(empty, empty)
xp_assert_close(res.statistic, nan)
xp_assert_close(res.pvalue, nan)
def test_input_validation(self, xp):
x = xp.asarray([1, 2]) # generic, valid inputs
y = xp.asarray([3, 4])
with assert_raises(ValueError, match="`use_continuity` must be one"):
mannwhitneyu(x, y, use_continuity='ekki')
with assert_raises(ValueError, match="`alternative` must be one of"):
mannwhitneyu(x, y, alternative='ekki')
with assert_raises(ValueError, match="`axis` must be an integer"):
mannwhitneyu(x, y, axis=1.5)
with assert_raises(ValueError, match="`method` must be one of"):
mannwhitneyu(x, y, method='ekki')
def test_auto(self, xp):
# Test that default method ('auto') chooses intended method
rng = np.random.default_rng(923823782530925934)
n = 8 # threshold to switch from exact to asymptotic
# both inputs are smaller than threshold; should use exact
x = xp.asarray(rng.random(n-1))
y = xp.asarray(rng.random(n-1))
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue == exact.pvalue
assert auto.pvalue != asymptotic.pvalue
# one input is smaller than threshold; should use exact
x = xp.asarray(rng.random(n-1))
y = xp.asarray(rng.random(n+1))
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue == exact.pvalue
assert auto.pvalue != asymptotic.pvalue
# other input is smaller than threshold; should use exact
auto = mannwhitneyu(y, x)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue == exact.pvalue
assert auto.pvalue != asymptotic.pvalue
# both inputs are larger than threshold; should use asymptotic
x = xp.asarray(rng.random(n+1))
y = xp.asarray(rng.random(n+1))
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue != exact.pvalue
assert auto.pvalue == asymptotic.pvalue
# both inputs are smaller than threshold, but there is a tie
# should use asymptotic
x = xp.asarray(rng.random(n-1))
y = xp.asarray(rng.random(n-1))
y = xpx.at(y)[3].set(x[3])
auto = mannwhitneyu(x, y)
asymptotic = mannwhitneyu(x, y, method='asymptotic')
exact = mannwhitneyu(x, y, method='exact')
assert auto.pvalue != exact.pvalue
assert auto.pvalue == asymptotic.pvalue
# --- Test Basic Functionality ---
x = [210.052110, 110.190630, 307.918612]
y = [436.08811482466416, 416.37397329768191, 179.96975939463582,
197.8118754228619, 34.038757281225756, 138.54220550921517,
128.7769351470246, 265.92721427951852, 275.6617533155341,
592.34083395416258, 448.73177590617018, 300.61495185038905,
187.97508449019588]
# This test was written for mann_whitney_u in gh-4933.
# Originally, the p-values for alternatives were swapped;
# this has been corrected and the tests have been refactored for
# compactness, but otherwise the tests are unchanged.
# R code for comparison, e.g.:
# options(digits = 16)
# x = c(210.052110, 110.190630, 307.918612)
# y = c(436.08811482466416, 416.37397329768191, 179.96975939463582,
# 197.8118754228619, 34.038757281225756, 138.54220550921517,
# 128.7769351470246, 265.92721427951852, 275.6617533155341,
# 592.34083395416258, 448.73177590617018, 300.61495185038905,
# 187.97508449019588)
# wilcox.test(x, y, alternative="g", exact=TRUE)
cases_basic = [[{"alternative": 'two-sided', "method": "asymptotic"},
(16., 0.6865041817876)],
[{"alternative": 'less', "method": "asymptotic"},
(16., 0.3432520908938)],
[{"alternative": 'greater', "method": "asymptotic"},
(16., 0.7047591913255)],
[{"alternative": 'two-sided', "method": "exact"},
(16., 0.7035714285714)],
[{"alternative": 'less', "method": "exact"},
(16., 0.3517857142857)],
[{"alternative": 'greater', "method": "exact"},
(16., 0.6946428571429)]]
@pytest.mark.parametrize(("kwds", "expected"), cases_basic)
@pytest.mark.parametrize("dtype", [None, 'float32', 'float64'])
def test_basic(self, kwds, expected, dtype, xp):
if is_numpy(xp) and xp.__version__ < "2.0" and dtype == 'float32':
pytest.skip("Scalar dtypes only respected after NEP 50.")
dtype = xp_default_dtype(xp) if dtype is None else getattr(xp, dtype)
x, y = xp.asarray(self.x, dtype=dtype), xp.asarray(self.y, dtype=dtype)
res = mannwhitneyu(x, y, **kwds)
xp_assert_close(res.statistic, xp.asarray(expected[0], dtype=dtype))
xp_assert_close(res.pvalue, xp.asarray(expected[1], dtype=dtype))
cases_continuity = [[{"alternative": 'two-sided', "use_continuity": True},
(23., 0.6865041817876)],
[{"alternative": 'less', "use_continuity": True},
(23., 0.7047591913255)],
[{"alternative": 'greater', "use_continuity": True},
(23., 0.3432520908938)],
[{"alternative": 'two-sided', "use_continuity": False},
(23., 0.6377328900502)],
[{"alternative": 'less', "use_continuity": False},
(23., 0.6811335549749)],
[{"alternative": 'greater', "use_continuity": False},
(23., 0.3188664450251)]]
@pytest.mark.parametrize(("kwds", "expected"), cases_continuity)
def test_continuity(self, kwds, expected, xp):
# When x and y are interchanged, less and greater p-values should
# swap (compare to above). This wouldn't happen if the continuity
# correction were applied in the wrong direction. Note that less and
# greater p-values do not sum to 1 when continuity correction is on,
# which is what we'd expect. Also check that results match R when
# continuity correction is turned off.
# Note that method='asymptotic' -> exact=FALSE
# and use_continuity=False -> correct=FALSE, e.g.:
# wilcox.test(x, y, alternative="t", exact=FALSE, correct=FALSE)
x, y = xp.asarray(self.x), xp.asarray(self.y)
res = mannwhitneyu(y, x, method='asymptotic', **kwds)
xp_assert_close(res.statistic, xp.asarray(expected[0]))
xp_assert_close(res.pvalue, xp.asarray(expected[1]))
def test_tie_correct(self, xp):
# Test tie correction against R's wilcox.test
# options(digits = 16)
# x = c(1, 2, 3, 4)
# y = c(1, 2, 3, 4, 5)
# wilcox.test(x, y, exact=FALSE)
x = xp.asarray([1., 2., 3., 4.])
y0 = xp.asarray([1., 2., 3., 4., 5.])
dy = xp.asarray([0., 1., 0., 1., 0.])*0.01
dy2 = xp.asarray([0., 0., 1., 0., 0.])*0.01
y = xp.stack([y0-0.01, y0-dy, y0-dy2, y0, y0+dy2, y0+dy, y0+0.01])
res = mannwhitneyu(x, y, axis=-1, method="asymptotic")
U_expected = [10, 9, 8.5, 8, 7.5, 7, 6]
p_expected = [1, 0.9017048037317, 0.804080657472, 0.7086240584439,
0.6197963884941, 0.5368784563079, 0.3912672792826]
xp_assert_equal(res.statistic, xp.asarray(U_expected))
xp_assert_close(res.pvalue, xp.asarray(p_expected))
# --- Test Exact Distribution of U ---
# These are tabulated values of the CDF of the exact distribution of
# the test statistic from pg 52 of reference [1] (Mann-Whitney Original)
pn3 = {1: [0.25, 0.5, 0.75], 2: [0.1, 0.2, 0.4, 0.6],
3: [0.05, .1, 0.2, 0.35, 0.5, 0.65]}
pn4 = {1: [0.2, 0.4, 0.6], 2: [0.067, 0.133, 0.267, 0.4, 0.6],
3: [0.028, 0.057, 0.114, 0.2, .314, 0.429, 0.571],
4: [0.014, 0.029, 0.057, 0.1, 0.171, 0.243, 0.343, 0.443, 0.557]}
pm5 = {1: [0.167, 0.333, 0.5, 0.667],
2: [0.047, 0.095, 0.19, 0.286, 0.429, 0.571],
3: [0.018, 0.036, 0.071, 0.125, 0.196, 0.286, 0.393, 0.5, 0.607],
4: [0.008, 0.016, 0.032, 0.056, 0.095, 0.143,
0.206, 0.278, 0.365, 0.452, 0.548],
5: [0.004, 0.008, 0.016, 0.028, 0.048, 0.075, 0.111,
0.155, 0.21, 0.274, 0.345, .421, 0.5, 0.579]}
pm6 = {1: [0.143, 0.286, 0.428, 0.571],
2: [0.036, 0.071, 0.143, 0.214, 0.321, 0.429, 0.571],
3: [0.012, 0.024, 0.048, 0.083, 0.131,
0.19, 0.274, 0.357, 0.452, 0.548],
4: [0.005, 0.01, 0.019, 0.033, 0.057, 0.086, 0.129,
0.176, 0.238, 0.305, 0.381, 0.457, 0.543], # the last element
# of the previous list, 0.543, has been modified from 0.545;
# I assume it was a typo
5: [0.002, 0.004, 0.009, 0.015, 0.026, 0.041, 0.063, 0.089,
0.123, 0.165, 0.214, 0.268, 0.331, 0.396, 0.465, 0.535],
6: [0.001, 0.002, 0.004, 0.008, 0.013, 0.021, 0.032, 0.047,
0.066, 0.09, 0.12, 0.155, 0.197, 0.242, 0.294, 0.350,
0.409, 0.469, 0.531]}
def test_exact_distribution(self):
# I considered parametrize. I decided against it.
setattr(_mwu_state, 's', _MWU(0, 0))
p_tables = {3: self.pn3, 4: self.pn4, 5: self.pm5, 6: self.pm6}
for n, table in p_tables.items():
for m, p in table.items():
# check p-value against table
u = np.arange(0, len(p))
_mwu_state.s.set_shapes(m, n)
assert_allclose(_mwu_state.s.cdf(k=u), p, atol=1e-3)
# check identity CDF + SF - PMF = 1
# ( In this implementation, SF(U) includes PMF(U) )
u2 = np.arange(0, m*n+1)
assert_allclose(_mwu_state.s.cdf(k=u2)
+ _mwu_state.s.sf(k=u2)
- _mwu_state.s.pmf(k=u2), 1)
# check symmetry about mean of U, i.e. pmf(U) = pmf(m*n-U)
pmf = _mwu_state.s.pmf(k=u2)
assert_allclose(pmf, pmf[::-1])
# check symmetry w.r.t. interchange of m, n
_mwu_state.s.set_shapes(n, m)
pmf2 = _mwu_state.s.pmf(k=u2)
assert_allclose(pmf, pmf2)
def test_asymptotic_behavior(self, xp):
rng = np.random.default_rng(12543)
# for small samples, the asymptotic test is not very accurate
x = xp.asarray(rng.random(5))
y = xp.asarray(rng.random(5))
res1 = mannwhitneyu(x, y, method="exact")
res2 = mannwhitneyu(x, y, method="asymptotic")
assert res1.statistic == res2.statistic
assert xp.abs(res1.pvalue - res2.pvalue) > 1e-2
# for large samples, they agree reasonably well
x = xp.asarray(rng.random(40))
y = xp.asarray(rng.random(40))
res1 = mannwhitneyu(x, y, method="exact")
res2 = mannwhitneyu(x, y, method="asymptotic")
assert res1.statistic == res2.statistic
assert xp.abs(res1.pvalue - res2.pvalue) < 1e-3
# --- Test Corner Cases ---
def test_exact_U_equals_mean(self, xp):
# Test U == m*n/2 with exact method
# Without special treatment, two-sided p-value > 1 because both
# one-sided p-values are > 0.5
x, y = xp.asarray([1., 2., 3.]), xp.asarray([1.5, 2.5])
res_l = mannwhitneyu(x, y, alternative="less", method="exact")
res_g = mannwhitneyu(x, y, alternative="greater", method="exact")
xp_assert_equal(res_l.pvalue, res_g.pvalue)
assert res_l.pvalue > 0.5
res = mannwhitneyu(x, y, alternative="two-sided", method="exact")
xp_assert_equal(res.statistic, xp.asarray(3.))
xp_assert_equal(res.pvalue, xp.asarray(1.))
# U == m*n/2 for asymptotic case tested in test_gh_2118
# The reason it's tricky for the asymptotic test has to do with
# continuity correction.
cases_scalar = [[{"alternative": 'two-sided', "method": "asymptotic"},
(0., 1.)],
[{"alternative": 'less', "method": "asymptotic"},
(0., 0.5)],
[{"alternative": 'greater', "method": "asymptotic"},
(0., 0.977249868052)],
[{"alternative": 'two-sided', "method": "exact"}, (0., 1)],
[{"alternative": 'less', "method": "exact"}, (0., 0.5)],
[{"alternative": 'greater', "method": "exact"}, (0., 1)]]
@pytest.mark.parametrize(("kwds", "result"), cases_scalar)
def test_scalar_data(self, kwds, result): # not important to preserve w/ array API
# just making sure scalars work
assert_allclose(mannwhitneyu(1, 2, **kwds), result)
def test_equal_scalar_data(self): # not important to preserve w/ array API
# when two scalars are equal, there is an -0.5/0 in the asymptotic
# approximation. R gives pvalue=1.0 for alternatives 'less' and
# 'greater' but NA for 'two-sided'. I don't see why, so I don't
# see a need for a special case to match that behavior.
assert_equal(mannwhitneyu(1, 1, method="exact"), (0.5, 1))
assert_equal(mannwhitneyu(1, 1, method="asymptotic"), (0.5, 1))
# without continuity correction, this becomes 0/0, which really
# is undefined
assert_equal(mannwhitneyu(1, 1, method="asymptotic",
use_continuity=False), (0.5, np.nan))
# --- Test Enhancements / Bug Reports ---
@pytest.mark.skip_xp_backends("jax.numpy", reason="lazy -> no _axis_nan_policy")
@pytest.mark.parametrize("method", ["asymptotic", "exact"])
def test_gh_12837_11113(self, method, xp):
# Test that behavior for broadcastable nd arrays is appropriate:
# output shape is correct and all values are equal to when the test
# is performed on one pair of samples at a time.
# Tests that gh-12837 and gh-11113 (requests for n-d input)
# are resolved
rng = np.random.default_rng(6083743794)
# arrays are broadcastable except for axis = -3
axis = -3
m, n = 7, 10 # sample sizes
x = rng.random((m, 3, 8))
y = rng.random((6, n, 1, 8)) + 0.1
res = mannwhitneyu(xp.asarray(x), xp.asarray(y), method=method, axis=axis)
shape = (6, 3, 8) # appropriate shape of outputs, given inputs
assert res.pvalue.shape == shape
assert res.statistic.shape == shape
# move axis of test to end for simplicity
x, y = np.moveaxis(x, axis, -1), np.moveaxis(y, axis, -1)
x = x[None, ...] # give x a zeroth dimension
assert x.ndim == y.ndim
x = np.broadcast_to(x, shape + (m,))
y = np.broadcast_to(y, shape + (n,))
assert x.shape[:-1] == shape
assert y.shape[:-1] == shape
# loop over pairs of samples
statistics = np.zeros(shape)
pvalues = np.zeros(shape)
for indices in product(*[range(i) for i in shape]):
xi = x[indices]
yi = y[indices]
temp = mannwhitneyu(xi, yi, method=method)
statistics[indices] = temp.statistic
pvalues[indices] = temp.pvalue
xp_assert_close(res.pvalue, xp.asarray(pvalues), atol=1e-16)
xp_assert_close(res.statistic, xp.asarray(statistics), atol=1e-16)
def test_gh_11355(self, xp):
# Test for correct behavior with NaN/Inf in input
x = [1, 2, 3, 4]
y = [3, 6, 7, 8, 9, 3, 2, 1, 4, 4, 5]
res1 = mannwhitneyu(xp.asarray(x), xp.asarray(y))
# Inf is not a problem. This is a rank test, and it's the largest value
x[0] = 1. # ensure floating point
y[4] = np.inf
res2 = mannwhitneyu(xp.asarray(x), xp.asarray(y))
xp_assert_equal(res1.statistic, res2.statistic)
xp_assert_equal(res1.pvalue, res2.pvalue)
@pytest.mark.skip_xp_backends("jax.numpy", reason="lazy -> no _axis_nan_policy")
def test_gh11355_nan(self, xp):
# NaNs should propagate by default.
x = [1., 2., 3., 4.]
y = [3, 6, 7, np.nan, 9, 3, 2, 1, 4, 4, 5]
res3 = mannwhitneyu(xp.asarray(x), xp.asarray(y))
xp_assert_equal(res3.statistic, xp.asarray(xp.nan))
xp_assert_equal(res3.pvalue, xp.asarray(xp.nan))
cases_11355 = [([1., 2, 3, 4],
[3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5],
10., 0.1297704873477),
([1., 2, 3, 4],
[3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
8.5, 0.08735617507695),
([1, 2, np.inf, 4],
[3, 6, 7, 8, np.inf, 3, 2, 1, 4, 4, 5],
17.5, 0.5988856695752),
([1, 2, np.inf, 4],
[3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
16., 0.4687165824462),
([1, np.inf, np.inf, 4],
[3, 6, 7, 8, np.inf, np.inf, 2, 1, 4, 4, 5],
24.5, 0.7912517950119)]
@pytest.mark.parametrize(("x", "y", "statistic", "pvalue"), cases_11355)
def test_gh_11355b(self, x, y, statistic, pvalue, xp):
# Test for correct behavior with NaN/Inf in input
res = mannwhitneyu(xp.asarray(x), xp.asarray(y), method='asymptotic')
xp_assert_close(res.statistic, xp.asarray(statistic), atol=1e-12)
xp_assert_close(res.pvalue, xp.asarray(pvalue), atol=1e-12)
cases_9184 = [[True, "less", "asymptotic", 0.900775348204],
[True, "greater", "asymptotic", 0.1223118025635],
[True, "two-sided", "asymptotic", 0.244623605127],
[False, "less", "asymptotic", 0.8896643190401],
[False, "greater", "asymptotic", 0.1103356809599],
[False, "two-sided", "asymptotic", 0.2206713619198],
[True, "less", "exact", 0.8967698967699],
[True, "greater", "exact", 0.1272061272061],
[True, "two-sided", "exact", 0.2544122544123]]
@pytest.mark.parametrize(("use_continuity", "alternative",
"method", "pvalue_exp"), cases_9184)
def test_gh_9184(self, use_continuity, alternative, method, pvalue_exp, xp):
# gh-9184 might be considered a doc-only bug. Please see the
# documentation to confirm that mannwhitneyu correctly notes
# that the output statistic is that of the first sample (x). In any
# case, check the case provided there against output from R.
# R code:
# options(digits=16)
# x <- c(0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
# y <- c(1.15, 0.88, 0.90, 0.74, 1.21)
# wilcox.test(x, y, alternative = "less", exact = FALSE)
# wilcox.test(x, y, alternative = "greater", exact = FALSE)
# wilcox.test(x, y, alternative = "two.sided", exact = FALSE)
# wilcox.test(x, y, alternative = "less", exact = FALSE,
# correct=FALSE)
# wilcox.test(x, y, alternative = "greater", exact = FALSE,
# correct=FALSE)
# wilcox.test(x, y, alternative = "two.sided", exact = FALSE,
# correct=FALSE)
# wilcox.test(x, y, alternative = "less", exact = TRUE)
# wilcox.test(x, y, alternative = "greater", exact = TRUE)
# wilcox.test(x, y, alternative = "two.sided", exact = TRUE)
statistic_exp = 35.
x = xp.asarray([0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46])
y = xp.asarray([1.15, 0.88, 0.90, 0.74, 1.21])
res = mannwhitneyu(x, y, use_continuity=use_continuity,
alternative=alternative, method=method)
xp_assert_equal(res.statistic, xp.asarray(statistic_exp))
xp_assert_close(res.pvalue, xp.asarray(pvalue_exp))
@pytest.mark.skip_xp_backends("jax.numpy", reason="lazy -> no _axis_nan_policy")
def test_gh_4067(self, xp):
# Test for correct behavior with all NaN input - default is propagate
nan = xp.asarray(xp.nan)
a = xp.stack([nan, nan, nan, nan, nan])
b = xp.stack([nan, nan, nan, nan, nan])
res = mannwhitneyu(a, b)
xp_assert_equal(res.statistic, xp.asarray(nan))
xp_assert_equal(res.pvalue, nan)
# All cases checked against R wilcox.test, e.g.
# options(digits=16)
# x = c(1, 2, 3)
# y = c(1.5, 2.5)
# wilcox.test(x, y, exact=FALSE, alternative='less')
cases_2118 = [[[1., 2., 3.], [1.5, 2.5], "greater", (3., 0.6135850036578)],
[[1., 2., 3.], [1.5, 2.5], "less", (3., 0.6135850036578)],
[[1., 2., 3.], [1.5, 2.5], "two-sided", (3., 1.0)],
[[1, 2, 3], [2], "greater", (1.5, 0.681324055883)],
[[1, 2, 3], [2], "less", (1.5, 0.681324055883)],
[[1, 2, 3], [2], "two-sided", (1.5, 1.)],
[[1, 2], [1, 2], "greater", (2., 0.667497228949)],
[[1, 2], [1, 2], "less", (2., 0.667497228949)],
[[1, 2], [1, 2], "two-sided", (2., 1.)]]
@pytest.mark.parametrize(["x", "y", "alternative", "expected"], cases_2118)
def test_gh_2118(self, x, y, alternative, expected, xp):
# test cases in which U == m*n/2 when method is asymptotic
# applying continuity correction could result in p-value > 1
res = mannwhitneyu(xp.asarray(x), xp.asarray(y), use_continuity=True,
alternative=alternative, method="asymptotic")
rtol = 1e-6 if xp_default_dtype(xp) == xp.float32 else 1e-12
xp_assert_close(res.statistic, xp.asarray(expected[0]), rtol=rtol)
xp_assert_close(res.pvalue, xp.asarray(expected[1]), rtol=rtol)
def test_gh19692_smaller_table(self):
# In gh-19692, we noted that the shape of the cache used in calculating
# p-values was dependent on the order of the inputs because the sample
# sizes n1 and n2 changed. This was indicative of unnecessary cache
# growth and redundant calculation. Check that this is resolved.
rng = np.random.default_rng(7600451795963068007)
m, n = 5, 11
x = rng.random(size=m)
y = rng.random(size=n)
setattr(_mwu_state, 's', _MWU(0, 0))
_mwu_state.s.reset() # reset cache
res = stats.mannwhitneyu(x, y, method='exact')
shape = _mwu_state.s.configurations.shape
assert shape[-1] == min(res.statistic, m*n - res.statistic) + 1
stats.mannwhitneyu(y, x, method='exact')
assert shape == _mwu_state.s.configurations.shape # same with reversed sizes
# Also, we weren't exploiting the symmetry of the null distribution
# to its full potential. Ensure that the null distribution is not
# evaluated explicitly for `k > m*n/2`.
_mwu_state.s.reset() # reset cache
stats.mannwhitneyu(x, 0*y, method='exact', alternative='greater')
shape = _mwu_state.s.configurations.shape
assert shape[-1] == 1 # k is smallest possible
stats.mannwhitneyu(0*x, y, method='exact', alternative='greater')
assert shape == _mwu_state.s.configurations.shape
@pytest.mark.parametrize('alternative', ['less', 'greater', 'two-sided'])
def test_permutation_method(self, alternative):
rng = np.random.default_rng(7600451795963068007)
x = rng.random(size=(2, 5))
y = rng.random(size=(2, 6))
res = stats.mannwhitneyu(x, y, method=stats.PermutationMethod(),
alternative=alternative, axis=1)
res2 = stats.mannwhitneyu(x, y, method='exact',
alternative=alternative, axis=1)
assert_allclose(res.statistic, res2.statistic, rtol=1e-15)
assert_allclose(res.pvalue, res2.pvalue, rtol=1e-15)
# Old tests moved from test_stats. Source of magic numbers unknown.
X = [19.8958398126694, 19.5452691647182, 19.0577309166425, 21.716543054589,
20.3269502208702, 20.0009273294025, 19.3440043632957, 20.4216806548105,
19.0649894736528, 18.7808043120398, 19.3680942943298, 19.4848044069953,
20.7514611265663, 19.0894948874598, 19.4975522356628, 18.9971170734274,
20.3239606288208, 20.6921298083835, 19.0724259532507, 18.9825187935021,
19.5144462609601, 19.8256857844223, 20.5174677102032, 21.1122407995892,
17.9490854922535, 18.2847521114727, 20.1072217648826, 18.6439891962179,
20.4970638083542, 19.5567594734914]
Y = [19.2790668029091, 16.993808441865, 18.5416338448258, 17.2634018833575,
19.1577183624616, 18.5119655377495, 18.6068455037221, 18.8358343362655,
19.0366413269742, 18.1135025515417, 19.2201873866958, 17.8344909022841,
18.2894380745856, 18.6661374133922, 19.9688601693252, 16.0672254617636,
19.00596360572, 19.201561539032, 19.0487501090183, 19.0847908674356]
rtol = 1e-14
def test_mannwhitneyu_one_sided(self, xp):
X, Y = xp.asarray(self.X), xp.asarray(self.Y)
u1, p1 = stats.mannwhitneyu(X, Y, alternative='less')
u2, p2 = stats.mannwhitneyu(Y, X, alternative='greater')
u3, p3 = stats.mannwhitneyu(X, Y, alternative='greater')
u4, p4 = stats.mannwhitneyu(Y, X, alternative='less')
xp_assert_equal(p1, p2)
xp_assert_equal(p3, p4)
assert p1 != p3
xp_assert_equal(u1, xp.asarray(498.))
xp_assert_equal(u2, xp.asarray(102.))
xp_assert_equal(u3, xp.asarray(498.))
xp_assert_equal(u4, xp.asarray(102.))
assert_allclose(p1, xp.asarray(0.999957683256589), rtol=self.rtol)
rtol = self.rtol if X.dtype == xp.float64 else 5e-4
assert_allclose(p3, xp.asarray(4.5941632666275e-05), rtol=rtol, atol=1e-16)
def test_mannwhitneyu_two_sided(self, xp):
X, Y = xp.asarray(self.X), xp.asarray(self.Y)
u1, p1 = stats.mannwhitneyu(X, Y, alternative='two-sided')
u2, p2 = stats.mannwhitneyu(Y, X, alternative='two-sided')
xp_assert_equal(p1, p2)
xp_assert_equal(u1, xp.asarray(498.))
xp_assert_equal(u2, xp.asarray(102.))
rtol = self.rtol if X.dtype == xp.float64 else 5e-4
xp_assert_close(p1, xp.asarray(9.188326533255e-05), rtol=rtol, atol=1e-16)
def test_mannwhitneyu_no_correct_one_sided(self, xp):
X, Y = xp.asarray(self.X), xp.asarray(self.Y)
u1, p1 = stats.mannwhitneyu(X, Y, False, alternative='less')
u2, p2 = stats.mannwhitneyu(Y, X, False, alternative='greater')
u3, p3 = stats.mannwhitneyu(X, Y, False, alternative='greater')
u4, p4 = stats.mannwhitneyu(Y, X, False, alternative='less')
xp_assert_equal(p1, p2)
xp_assert_equal(p3, p4)
assert p1 != p3
xp_assert_equal(u1, xp.asarray(498.))
xp_assert_equal(u2, xp.asarray(102.))
xp_assert_equal(u3, xp.asarray(498.))
xp_assert_equal(u4, xp.asarray(102.))
rtol = self.rtol if X.dtype == xp.float64 else 5e-4
xp_assert_close(p1, xp.asarray(0.999955905990004), rtol=rtol, atol=1e-16)
xp_assert_close(p3, xp.asarray(4.40940099958089e-05), rtol=rtol, atol=1e-16)
def test_mannwhitneyu_no_correct_two_sided(self, xp):
X, Y = xp.asarray(self.X), xp.asarray(self.Y)
u1, p1 = stats.mannwhitneyu(X, Y, False, alternative='two-sided')
u2, p2 = stats.mannwhitneyu(Y, X, False, alternative='two-sided')
xp_assert_equal(p1, p2)
xp_assert_equal(u1, xp.asarray(498.))
xp_assert_equal(u2, xp.asarray(102.))
rtol = self.rtol if X.dtype == xp.float64 else 5e-4
xp_assert_close(p1, xp.asarray(8.81880199916178e-05), rtol=rtol, atol=1e-16)
def test_mannwhitneyu_ones(self, xp):
# test for gh-1428
x = xp.asarray([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = xp.asarray([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
# p-value from R, e.g. wilcox.test(x, y, alternative="g")
res = stats.mannwhitneyu(x, y, alternative='less')
xp_assert_close(res.statistic, xp.asarray(16980.5))
xp_assert_close(res.pvalue, xp.asarray(2.8214327656317373e-5))
res = stats.mannwhitneyu(x, y, alternative='greater')
xp_assert_close(res.statistic, xp.asarray(16980.5))
xp_assert_close(res.pvalue, xp.asarray(0.9999719954296))
res = stats.mannwhitneyu(x, y, alternative='two-sided')
xp_assert_close(res.statistic, xp.asarray(16980.5))
xp_assert_close(res.pvalue, xp.asarray(5.642865531266e-5))
| TestMannWhitneyU |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/operator1.py | {
"start": 173,
"end": 234
} | class ____:
def __eq__(self, Foo):
return "equal"
| A |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1137669,
"end": 1138283
} | class ____(ScaleInvalidDataShowAsopacity):
"""
ScaleInvalidDataShowAsValueopacity schema wrapper.
Parameters
----------
value : float
The overall opacity (value between [0,1]).
**Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``,
``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise.
"""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAsValue<"opacity">'}
def __init__(self, value: Optional[float] = Undefined, **kwds):
super().__init__(value=value, **kwds)
| ScaleInvalidDataShowAsValueopacity |
python | prabhupant__python-ds | data_structures/graphs/bfs.py | {
"start": 37,
"end": 751
} | class ____:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.vertices = vertices
def add_edge(self, u, v):
self.graph[u].append(v)
def bfs(self, s):
visited = [False] * self.vertices
queue = []
queue.append(s)
visited[s] = True
bfs = []
while queue:
s = queue.pop(0)
print(s, end=' ')
for i in self.graph[s]:
if visited[i] == False:
queue.append(i)
visited[i] = True
g = Graph(6)
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 3)
g.add_edge(0, 3)
g.add_edge(2, 4)
g.add_edge(3, 4)
g.add_edge(3, 5)
g.bfs(0)
| Graph |
python | crytic__slither | slither/printers/guidance/echidna.py | {
"start": 16554,
"end": 19193
} | class ____(AbstractPrinter):
ARGUMENT = "echidna"
HELP = "Export Echidna guiding information"
WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#echidna"
def output(self, filename: str) -> Output: # pylint: disable=too-many-locals
"""
Output the inheritance relation
_filename is not used
Args:
_filename(string)
"""
contracts = self.slither.contracts
payable = _extract_payable(contracts)
timestamp = _extract_solidity_variable_usage(
contracts, SolidityVariableComposed("block.timestamp")
)
block_number = _extract_solidity_variable_usage(
contracts, SolidityVariableComposed("block.number")
)
msg_sender = _extract_solidity_variable_usage(
contracts, SolidityVariableComposed("msg.sender")
)
msg_gas = _extract_solidity_variable_usage(contracts, SolidityVariableComposed("msg.gas"))
assert_usage = _extract_assert(contracts)
cst_functions = _extract_constant_functions(contracts)
(cst_used, cst_used_in_binary) = _extract_constants(contracts)
functions_relations = _extract_function_relations(contracts)
constructors = {
contract.name: contract.constructor.full_name
for contract in contracts
if contract.constructor
}
external_calls = _have_external_calls(contracts)
# call_parameters = _call_a_parameter(self.slither, contracts)
use_balance = _use_balance(contracts)
with_fallback = list(_with_fallback(contracts))
with_receive = list(_with_receive(contracts))
d = {
"payable": payable,
"timestamp": timestamp,
"block_number": block_number,
"msg_sender": msg_sender,
"msg_gas": msg_gas,
"assert": assert_usage,
"constant_functions": cst_functions,
"constants_used": cst_used,
"constants_used_in_binary": cst_used_in_binary,
"functions_relations": functions_relations,
"constructors": constructors,
"have_external_calls": external_calls,
# "call_a_parameter": call_parameters,
"use_balance": use_balance,
"solc_versions": [unit.solc_version for unit in self.slither.compilation_units],
"with_fallback": with_fallback,
"with_receive": with_receive,
}
self.info(json.dumps(d, indent=4))
res = self.generate_output(json.dumps(d, indent=4))
return res
| Echidna |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/asyncio/base.py | {
"start": 3720,
"end": 8459
} | class ____(StartableContext[_T_co]):
__slots__ = ("gen",)
gen: AsyncGenerator[_T_co, Any]
def __init__(
self,
func: Callable[..., AsyncIterator[_T_co]],
args: Tuple[Any, ...],
kwds: Dict[str, Any],
):
self.gen = func(*args, **kwds) # type: ignore
async def start(self, is_ctxmanager: bool = False) -> _T_co:
try:
start_value = await anext(self.gen)
except StopAsyncIteration:
raise RuntimeError("generator didn't yield") from None
# if not a context manager, then interrupt the generator, don't
# let it complete. this step is technically not needed, as the
# generator will close in any case at gc time. not clear if having
# this here is a good idea or not (though it helps for clarity IMO)
if not is_ctxmanager:
await self.gen.aclose()
return start_value
async def __aexit__(
self, typ: Any, value: Any, traceback: Any
) -> Optional[bool]:
# vendored from contextlib.py
if typ is None:
try:
await anext(self.gen)
except StopAsyncIteration:
return False
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = typ()
try:
await self.gen.athrow(value)
except StopAsyncIteration as exc:
# Suppress StopIteration *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed.
return exc is not value
except RuntimeError as exc:
# Don't re-raise the passed in exception. (issue27122)
if exc is value:
return False
# Avoid suppressing if a Stop(Async)Iteration exception
# was passed to athrow() and later wrapped into a RuntimeError
# (see PEP 479 for sync generators; async generators also
# have this behavior). But do this only if the exception
# wrapped
# by the RuntimeError is actully Stop(Async)Iteration (see
# issue29692).
if (
isinstance(value, (StopIteration, StopAsyncIteration))
and exc.__cause__ is value
):
return False
raise
except BaseException as exc:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
if exc is not value:
raise
return False
raise RuntimeError("generator didn't stop after athrow()")
def asyncstartablecontext(
func: Callable[..., AsyncIterator[_T_co]],
) -> Callable[..., GeneratorStartableContext[_T_co]]:
"""@asyncstartablecontext decorator.
the decorated function can be called either as ``async with fn()``, **or**
``await fn()``. This is decidedly different from what
``@contextlib.asynccontextmanager`` supports, and the usage pattern
is different as well.
Typical usage:
.. sourcecode:: text
@asyncstartablecontext
async def some_async_generator(<arguments>):
<setup>
try:
yield <value>
except GeneratorExit:
# return value was awaited, no context manager is present
# and caller will .close() the resource explicitly
pass
else:
<context manager cleanup>
Above, ``GeneratorExit`` is caught if the function were used as an
``await``. In this case, it's essential that the cleanup does **not**
occur, so there should not be a ``finally`` block.
If ``GeneratorExit`` is not invoked, this means we're in ``__aexit__``
and we were invoked as a context manager, and cleanup should proceed.
"""
@functools.wraps(func)
def helper(*args: Any, **kwds: Any) -> GeneratorStartableContext[_T_co]:
return GeneratorStartableContext(func, args, kwds)
return helper
| GeneratorStartableContext |
python | dagster-io__dagster | python_modules/libraries/dagster-looker/dagster_looker/lkml/liquid_utils.py | {
"start": 1671,
"end": 1728
} | class ____(DateTag):
name = TAG_DATE_START
| DateStartTag |
python | tensorflow__tensorflow | tensorflow/python/distribute/parameter_server_strategy.py | {
"start": 2305,
"end": 7341
} | class ____(distribute_lib.StrategyV1):
"""An asynchronous multi-worker parameter server tf.distribute strategy.
This strategy requires two roles: workers and parameter servers. Variables and
updates to those variables will be assigned to parameter servers and other
operations are assigned to workers.
When each worker has more than one GPU, operations will be replicated on all
GPUs. Even though operations may be replicated, variables are not and each
worker shares a common view for which parameter server a variable is assigned
to.
By default it uses `TFConfigClusterResolver` to detect configurations for
multi-worker training. This requires a 'TF_CONFIG' environment variable and
the 'TF_CONFIG' must have a cluster spec.
This class assumes each worker is running the same code independently, but
parameter servers are running a standard server. This means that while each
worker will synchronously compute a single gradient update across all GPUs,
updates between workers proceed asynchronously. Operations that occur only on
the first replica (such as incrementing the global step), will occur on the
first replica *of every worker*.
It is expected to call `call_for_each_replica(fn, ...)` for any
operations which potentially can be replicated across replicas (i.e. multiple
GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra
caution needs to be taken:
1) It is generally not recommended to open a device scope under the strategy's
scope. A device scope (i.e. calling `tf.device`) will be merged with or
override the device for operations but will not change the device for
variables.
2) It is also not recommended to open a colocation scope (i.e. calling
`tf.compat.v1.colocate_with`) under the strategy's scope. For colocating
variables, use `strategy.extended.colocate_vars_with` instead. Colocation of
ops will possibly create device assignment conflicts.
Note: This strategy only works with the Estimator API. Pass an instance of
this strategy to the `experimental_distribute` argument when you create the
`RunConfig`. This instance of `RunConfig` should then be passed to the
`Estimator` instance on which `train_and_evaluate` is called.
For Example:
```
strategy = tf.distribute.experimental.ParameterServerStrategy()
run_config = tf.estimator.RunConfig(
experimental_distribute.train_distribute=strategy)
estimator = tf.estimator.Estimator(config=run_config)
tf.estimator.train_and_evaluate(estimator,...)
```
"""
def __init__(self, cluster_resolver=None):
"""Initializes this strategy with an optional `cluster_resolver`.
Args:
cluster_resolver: Optional
`tf.distribute.cluster_resolver.ClusterResolver` object. Defaults to a
`tf.distribute.cluster_resolver.TFConfigClusterResolver`.
"""
if cluster_resolver is None:
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
super(ParameterServerStrategyV1, self).__init__(
ParameterServerStrategyExtended(
self, cluster_resolver=cluster_resolver))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"ParameterServerStrategy")
def experimental_distribute_dataset(self, dataset, options=None):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function`."
)
self._raise_pss_error_if_eager()
super(ParameterServerStrategyV1,
self).experimental_distribute_dataset(dataset=dataset,
options=options)
def distribute_datasets_from_function(self, dataset_fn, options=None):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy")
self._raise_pss_error_if_eager()
super(ParameterServerStrategyV1, self).distribute_datasets_from_function(
dataset_fn=dataset_fn, options=options)
def run(self, fn, args=(), kwargs=None, options=None):
self._raise_pss_error_if_eager()
super(ParameterServerStrategyV1, self).run(
fn, args=args, kwargs=kwargs, options=options)
def scope(self):
self._raise_pss_error_if_eager()
return super(ParameterServerStrategyV1, self).scope()
def _raise_pss_error_if_eager(self):
if context.executing_eagerly():
raise NotImplementedError(
"`tf.compat.v1.distribute.experimental.ParameterServerStrategy` "
"currently only works with the tf.Estimator API")
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
| ParameterServerStrategyV1 |
python | facebook__pyre-check | client/commands/commands.py | {
"start": 938,
"end": 1152
} | class ____(Exception):
exit_code: ExitCode
def __init__(self, message: str, exit_code: ExitCode = ExitCode.FAILURE) -> None:
super().__init__(message)
self.exit_code = exit_code
| ClientException |
python | spyder-ide__spyder | external-deps/qtconsole/examples/embed_qtconsole.py | {
"start": 934,
"end": 1580
} | class ____(QtWidgets.QMainWindow):
"""A window that contains a single Qt console."""
def __init__(self):
super().__init__()
self.jupyter_widget = make_jupyter_widget_with_kernel()
self.setCentralWidget(self.jupyter_widget)
def shutdown_kernel(self):
print('Shutting down kernel...')
self.jupyter_widget.kernel_client.stop_channels()
self.jupyter_widget.kernel_manager.shutdown_kernel()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
app.aboutToQuit.connect(window.shutdown_kernel)
sys.exit(app.exec_())
| MainWindow |
python | getsentry__sentry | src/sentry/rules/conditions/event_attribute.py | {
"start": 733,
"end": 2652
} | class ____(ABC):
minimum_path_length: int
@classmethod
def handle(cls, path: list[str], event: GroupEvent) -> list[str]:
if len(path) < cls.minimum_path_length:
return []
return cls._handle(path, event)
@classmethod
@abstractmethod
def _handle(cls, path: list[str], event: GroupEvent) -> list[str]:
raise NotImplementedError
attribute_registry = Registry[type[AttributeHandler]]()
# Maps attributes to snuba columns
ATTR_CHOICES: dict[str, Columns | None] = {
"message": Columns.MESSAGE,
"platform": Columns.PLATFORM,
"environment": Columns.ENVIRONMENT,
"type": Columns.TYPE,
"error.handled": Columns.ERROR_HANDLED,
"error.unhandled": Columns.ERROR_HANDLED,
"error.main_thread": Columns.ERROR_MAIN_THREAD,
"exception.type": Columns.ERROR_TYPE,
"exception.value": Columns.ERROR_VALUE,
"user.id": Columns.USER_ID,
"user.email": Columns.USER_EMAIL,
"user.username": Columns.USER_USERNAME,
"user.ip_address": Columns.USER_IP_ADDRESS,
"http.method": Columns.HTTP_METHOD,
"http.url": Columns.HTTP_URL,
"http.status_code": Columns.HTTP_STATUS_CODE,
"sdk.name": Columns.SDK_NAME,
"stacktrace.code": None,
"stacktrace.module": Columns.STACK_MODULE,
"stacktrace.filename": Columns.STACK_FILENAME,
"stacktrace.abs_path": Columns.STACK_ABS_PATH,
"stacktrace.package": Columns.STACK_PACKAGE,
"unreal.crash_type": Columns.UNREAL_CRASH_TYPE,
"app.in_foreground": Columns.APP_IN_FOREGROUND,
"os.distribution_name": Columns.OS_DISTRIBUTION_NAME,
"os.distribution_version": Columns.OS_DISTRIBUTION_VERSION,
"symbolicated_in_app": Columns.SYMBOLICATED_IN_APP,
"ota_updates.channel": Columns.OTA_UPDATES_CHANNEL,
"ota_updates.runtime_version": Columns.OTA_UPDATES_RUNTIME_VERSION,
"ota_updates.update_id": Columns.OTA_UPDATES_UPDATE_ID,
}
| AttributeHandler |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/hooks/test_asb.py | {
"start": 1637,
"end": 12320
} | class ____:
@pytest.fixture(autouse=True)
def setup_test_cases(self, create_mock_connection):
self.queue_name = "test_queue"
self.conn_id = "azure_service_bus_default"
self.connection_string = (
"Endpoint=sb://test-service-bus-provider.servicebus.windows.net/;"
"SharedAccessKeyName=Test;SharedAccessKey=1234566acbc"
)
self.mock_conn = create_mock_connection(
Connection(
conn_id=self.conn_id,
conn_type="azure_service_bus",
schema=self.connection_string,
)
)
self.mock_conn_without_schema = Connection(
conn_id="azure_service_bus_default",
conn_type="azure_service_bus",
schema="",
extra={"fully_qualified_namespace": "fully_qualified_namespace"},
)
def test_get_conn(self):
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
assert isinstance(hook.get_conn(), ServiceBusAdministrationClient)
@mock.patch(f"{MODULE}.get_sync_default_azure_credential")
@mock.patch(f"{MODULE}.AdminClientHook.get_connection")
def test_get_conn_fallback_to_default_azure_credential_when_schema_is_not_provided(
self, mock_connection, mock_default_azure_credential
):
mock_connection.return_value = self.mock_conn_without_schema
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
assert isinstance(hook.get_conn(), ServiceBusAdministrationClient)
mock_default_azure_credential.assert_called_with(
managed_identity_client_id=None, workload_identity_tenant_id=None
)
@mock.patch("azure.servicebus.management.QueueProperties")
@mock.patch(f"{MODULE}.AdminClientHook.get_conn")
def test_create_queue(self, mock_sb_admin_client, mock_queue_properties):
"""
Test `create_queue` hook function with mocking connection, queue properties value and
the azure service bus `create_queue` function
"""
mock_queue_properties.name = self.queue_name
mock_sb_admin_client.return_value.__enter__.return_value.create_queue.return_value = (
mock_queue_properties
)
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
response = hook.create_queue(self.queue_name)
assert response == mock_queue_properties
@mock.patch(f"{MODULE}.ServiceBusAdministrationClient")
def test_create_queue_exception(self, mock_sb_admin_client):
"""Test `create_queue` functionality to raise ValueError by passing queue name as None"""
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
with pytest.raises(TypeError):
hook.create_queue(None)
@mock.patch(f"{MODULE}.AdminClientHook.get_conn")
def test_delete_queue(self, mock_sb_admin_client):
"""
Test Delete queue functionality by passing queue name, assert the function with values,
mock the azure service bus function `delete_queue`
"""
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
hook.delete_queue(self.queue_name)
expected_calls = [mock.call().__enter__().delete_queue(self.queue_name)]
mock_sb_admin_client.assert_has_calls(expected_calls)
@mock.patch(f"{MODULE}.ServiceBusAdministrationClient")
def test_delete_queue_exception(self, mock_sb_admin_client):
"""Test `delete_queue` functionality to raise ValueError, by passing queue name as None"""
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
with pytest.raises(TypeError):
hook.delete_queue(None)
# Test creating a topic using hook method `create_topic`
@mock.patch("azure.servicebus.management.TopicProperties")
@mock.patch(f"{MODULE}.AdminClientHook.get_conn")
def test_create_topic(self, mock_sb_admin_client, mock_topic_properties):
"""
Test `create_topic` hook function with mocking connection, topic properties value and
the azure service bus `create_topic` function
"""
topic_name = "test_topic_name"
mock_topic_properties.name = topic_name
mock_sb_admin_client.return_value.__enter__.return_value.create_topic.return_value = (
mock_topic_properties
)
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
with mock.patch.object(hook.log, "info") as mock_log_info:
hook.create_topic(topic_name)
assert mock_topic_properties.name == topic_name
mock_log_info.assert_called_with("Created Topic %s", topic_name)
# Test creating subscription with topic name and subscription name using hook method `create_subscription`
@mock.patch("azure.servicebus.management.SubscriptionProperties")
@mock.patch(f"{MODULE}.AdminClientHook.get_conn")
def test_create_subscription(self, mock_sb_admin_client, mock_subscription_properties):
"""
Test `create_subscription` hook function with mocking connection, subscription properties value and
the azure service bus `create_subscription` function
"""
topic_name = "test_topic_name"
subscription_name = "test_subscription_name"
mock_subscription_properties.name = subscription_name
mock_sb_admin_client.return_value.__enter__.return_value.create_subscription.return_value = (
mock_subscription_properties
)
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
hook.create_subscription(topic_name, subscription_name, dead_lettering_on_message_expiration=False)
mock_sb_admin_client.return_value.__enter__.return_value.create_subscription.assert_called_once_with(
topic_name=topic_name,
subscription_name=subscription_name,
lock_duration=None,
requires_session=None,
default_message_time_to_live=None,
dead_lettering_on_message_expiration=False,
dead_lettering_on_filter_evaluation_exceptions=None,
max_delivery_count=10,
enable_batched_operations=True,
forward_to=None,
user_metadata=None,
forward_dead_lettered_messages_to=None,
auto_delete_on_idle=None,
)
assert mock_subscription_properties.name == subscription_name
# Test creating subscription with topic name, subscription name, correlation rule and rule naame
# using hook method `create_subscription`
@mock.patch("azure.servicebus.management.RuleProperties")
@mock.patch("azure.servicebus.management.SubscriptionProperties")
@mock.patch(f"{MODULE}.AdminClientHook.get_conn")
def test_create_subscription_with_rule(
self, mock_sb_admin_client, mock_subscription_properties, mock_rule_properties
):
"""
Test `create_subscription` hook function with mocking connection, subscription properties value and
the azure service bus `create_subscription` function
"""
subscription_name = "test_subscription_name"
mock_rule_name = "test_rule_name"
mock_subscription_properties.name = subscription_name
mock_rule_properties.name = mock_rule_name
mock_sb_admin_client.return_value.__enter__.return_value.create_subscription.return_value = (
mock_subscription_properties
)
mock_sb_admin_client.return_value.__enter__.return_value.create_rule.return_value = (
mock_rule_properties
)
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
hook.create_subscription("test_topic_name", subscription_name)
assert mock_subscription_properties.name == subscription_name
assert mock_rule_properties.name == mock_rule_name
@mock.patch("azure.servicebus.management.SubscriptionProperties")
@mock.patch(f"{MODULE}.AdminClientHook.get_conn")
def test_modify_subscription(self, mock_sb_admin_client, mock_subscription_properties):
"""
Test modify subscription functionality by ensuring correct data is copied into properties
and passed to update_subscription method of connection mocking the azure service bus function
`update_subscription`
"""
subscription_name = "test_subscription_name"
topic_name = "test_topic_name"
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
mock_sb_admin_client.return_value.__enter__.return_value.get_subscription.return_value = (
mock_subscription_properties
)
hook.update_subscription(
topic_name,
subscription_name,
max_delivery_count=3,
dead_lettering_on_message_expiration=True,
enable_batched_operations=True,
)
expected_calls = [
mock.call().__enter__().get_subscription(topic_name, subscription_name),
mock.call().__enter__().update_subscription(topic_name, mock_subscription_properties),
mock.call().__enter__().get_subscription(topic_name, subscription_name),
]
mock_sb_admin_client.assert_has_calls(expected_calls)
@mock.patch(f"{MODULE}.AdminClientHook.get_conn")
def test_delete_subscription(self, mock_sb_admin_client):
"""
Test Delete subscription functionality by passing subscription name and topic name,
assert the function with values, mock the azure service bus function `delete_subscription`
"""
subscription_name = "test_subscription_name"
topic_name = "test_topic_name"
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
hook.delete_subscription(subscription_name, topic_name)
expected_calls = [mock.call().__enter__().delete_subscription(topic_name, subscription_name)]
mock_sb_admin_client.assert_has_calls(expected_calls)
@pytest.mark.parametrize(
("mock_subscription_name", "mock_topic_name"),
[("subscription_1", None), (None, "topic_1")],
)
@mock.patch(f"{MODULE}.AdminClientHook")
def test_delete_subscription_exception(
self, mock_sb_admin_client, mock_subscription_name, mock_topic_name
):
"""
Test `delete_subscription` functionality to raise AirflowException,
by passing subscription name and topic name as None and pytest raise Airflow Exception
"""
hook = AdminClientHook(azure_service_bus_conn_id=self.conn_id)
with pytest.raises(TypeError):
hook.delete_subscription(mock_subscription_name, mock_topic_name)
| TestAdminClientHook |
python | django__django | tests/serializers/test_json.py | {
"start": 9069,
"end": 9750
} | class ____(
SerializersTransactionTestBase, TransactionTestCase
):
serializer_name = "json"
fwd_ref_str = """[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16T15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
| JsonSerializerTransactionTestCase |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 51012,
"end": 55202
} | class ____(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[EncoderDecoderCache] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| Seq2SeqLMOutput |
python | doocs__leetcode | solution/1300-1399/1372.Longest ZigZag Path in a Binary Tree/Solution.py | {
"start": 192,
"end": 534
} | class ____:
def longestZigZag(self, root: TreeNode) -> int:
def dfs(root, l, r):
if root is None:
return
nonlocal ans
ans = max(ans, l, r)
dfs(root.left, r + 1, 0)
dfs(root.right, 0, l + 1)
ans = 0
dfs(root, 0, 0)
return ans
| Solution |
python | ray-project__ray | python/ray/data/tests/test_predicate_pushdown.py | {
"start": 13087,
"end": 15103
} | class ____:
"""Tests for pushing predicates into Read operators.
When a data source supports predicate pushdown (like Parquet),
the filter should be absorbed into the Read operator itself.
"""
@pytest.fixture
def parquet_ds(self, ray_start_regular_shared):
return ray.data.read_parquet("example://iris.parquet")
def test_complex_pipeline_all_filters_push_to_read(self, parquet_ds):
"""Complex pipeline: filters should push through all operators into Read.
Pipeline: Read -> Filter -> Rename -> Filter -> Sort -> Repartition
-> Filter -> Limit -> Filter
All filters should fuse, push through all operators, rebind through rename,
and be absorbed into the Read operator.
"""
ds = (
parquet_ds.filter(expr=col("sepal.length") > 4.0)
.rename_columns({"sepal.length": "len", "sepal.width": "width"})
.filter(expr=col("len") < 7.0)
.sort("len")
.repartition(3)
.filter(expr=col("width") > 2.5)
.limit(100)
.filter(expr=col("len") > 4.5)
)
# Verify correctness: should apply all filters correctly
expected = (
parquet_ds.filter(
expr=(col("sepal.length") > 4.0)
& (col("sepal.length") < 7.0)
& (col("sepal.width") > 2.5)
& (col("sepal.length") > 4.5)
)
.rename_columns({"sepal.length": "len", "sepal.width": "width"})
.sort("len")
.repartition(3)
.limit(100)
)
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Verify plan: all filters pushed into Read, passthrough ops remain
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert not plan_has_operator(
optimized_plan, Filter
), "No Filter operators should remain after pushdown into Read"
| TestPredicatePushdownIntoRead |
python | getsentry__sentry | tests/apidocs/endpoints/organizations/test_event_id_lookup.py | {
"start": 136,
"end": 672
} | class ____(APIDocsTestCase):
def setUp(self) -> None:
event = self.create_event("a", message="oh no")
self.url = reverse(
"sentry-api-0-event-id-lookup",
kwargs={"organization_id_or_slug": self.organization.slug, "event_id": event.event_id},
)
self.login_as(user=self.user)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
| OrganizationEventIDLookupDocs |
python | huggingface__transformers | src/transformers/models/janus/modular_janus.py | {
"start": 24796,
"end": 24866
} | class ____(ChameleonVQVAEEncoderAttnBlock):
pass
| JanusVQVAEAttnBlock |
python | neetcode-gh__leetcode | python/0091-decode-ways.py | {
"start": 0,
"end": 870
} | class ____:
def numDecodings(self, s: str) -> int:
# Memoization
dp = {len(s): 1}
def dfs(i):
if i in dp:
return dp[i]
if s[i] == "0":
return 0
res = dfs(i + 1)
if i + 1 < len(s) and (
s[i] == "1" or s[i] == "2" and s[i + 1] in "0123456"
):
res += dfs(i + 2)
dp[i] = res
return res
return dfs(0)
# Dynamic Programming
dp = {len(s): 1}
for i in range(len(s) - 1, -1, -1):
if s[i] == "0":
dp[i] = 0
else:
dp[i] = dp[i + 1]
if i + 1 < len(s) and (
s[i] == "1" or s[i] == "2" and s[i + 1] in "0123456"
):
dp[i] += dp[i + 2]
return dp[0]
| Solution |
python | neetcode-gh__leetcode | python/0108-convert-sorted-array-to-binary-search-tree.py | {
"start": 192,
"end": 517
} | class ____:
def sortedArrayToBST(self, nums: List[int]) -> Optional[TreeNode]:
if not nums:
return None
mid = len(nums)//2
root = TreeNode(nums[mid])
root.left = self.sortedArrayToBST(nums[:mid])
root.right = self.sortedArrayToBST(nums[mid+1:])
return root
| Solution |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py | {
"start": 13569,
"end": 14749
} | class ____(Benchmark):
r"""
Schwefel 20 objective function.
This class defines the Schwefel 20 [1]_ global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\text{Schwefel20}}(x) = \sum_{i=1}^n \lvert x_i \rvert
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Jamil #122 is incorrect. There shouldn't be a leading minus sign.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x))
| Schwefel20 |
python | jazzband__pip-tools | piptools/repositories/pypi.py | {
"start": 1731,
"end": 19557
} | class ____(BaseRepository):
HASHABLE_PACKAGE_TYPES = {"bdist_wheel", "sdist"}
"""
The PyPIRepository will use the provided Finder instance to lookup
packages. Typically, it looks up packages on PyPI (the default implicit
config), but any other PyPI mirror can be used if index_urls is
changed/configured on the Finder.
"""
def __init__(self, pip_args: list[str], cache_dir: str):
# Use pip's parser for pip.conf management and defaults.
# General options (find_links, index_url, extra_index_url, trusted_host,
# and pre) are deferred to pip.
self._command: InstallCommand = create_command("install")
options, _ = self.command.parse_args(pip_args)
if options.cache_dir:
options.cache_dir = normalize_path(options.cache_dir)
options.require_hashes = False
options.ignore_dependencies = False
self._options: optparse.Values = options
self._session = self.command._build_session(options)
self._finder = self.command._build_package_finder(
options=options, session=self.session
)
# Caches
# stores project_name => InstallationCandidate mappings for all
# versions reported by PyPI, so we only have to ask once for each
# project
self._available_candidates_cache: dict[str, list[InstallationCandidate]] = {}
# stores InstallRequirement => list(InstallRequirement) mappings
# of all secondary dependencies for the given requirement, so we
# only have to go to disk once for each requirement
self._dependencies_cache: dict[InstallRequirement, set[InstallRequirement]] = {}
# Setup file paths
self._cache_dir = normalize_path(str(cache_dir))
self._download_dir = os.path.join(self._cache_dir, "pkgs")
# Default pip's logger is noisy, so decrease it's verbosity
setup_logging(
verbosity=log.verbosity - 1,
no_color=self.options.no_color,
user_log_file=self.options.log,
)
def clear_caches(self) -> None:
rmtree(self._download_dir, ignore_errors=True)
@property
def options(self) -> optparse.Values:
return self._options
@property
def session(self) -> PipSession:
return self._session
@property
def finder(self) -> PackageFinder:
return self._finder
@property
def command(self) -> InstallCommand:
"""Return an install command instance."""
return self._command
def find_all_candidates(self, req_name: str) -> list[InstallationCandidate]:
if req_name not in self._available_candidates_cache:
candidates = self.finder.find_all_candidates(req_name)
self._available_candidates_cache[req_name] = candidates
return self._available_candidates_cache[req_name]
def find_best_match(
self, ireq: InstallRequirement, prereleases: bool | None = None
) -> InstallRequirement:
"""
Returns a pinned InstallRequirement object that indicates the best match
for the given InstallRequirement according to the external repository.
"""
if ireq.editable or is_url_requirement(ireq):
return ireq # return itself as the best match
all_candidates = self.find_all_candidates(ireq.name)
candidates_by_version = lookup_table(all_candidates, key=candidate_version)
matching_versions = ireq.specifier.filter(
(candidate.version for candidate in all_candidates), prereleases=prereleases
)
matching_candidates = list(
itertools.chain.from_iterable(
candidates_by_version[ver] for ver in matching_versions
)
)
if not matching_candidates:
raise NoCandidateFound(ireq, all_candidates, self.finder)
evaluator = self.finder.make_candidate_evaluator(ireq.name)
best_candidate_result = evaluator.compute_best_candidate(matching_candidates)
best_candidate = best_candidate_result.best_candidate
# Turn the candidate into a pinned InstallRequirement
return make_install_requirement(
best_candidate.name,
best_candidate.version,
ireq,
)
def resolve_reqs(
self,
download_dir: str | None,
ireq: InstallRequirement,
wheel_cache: WheelCache,
) -> set[InstallationCandidate]:
with (
get_build_tracker() as build_tracker,
TempDirectory(kind="resolver") as temp_dir,
indent_log(),
):
preparer_kwargs = {
"temp_build_dir": temp_dir,
"options": self.options,
"session": self.session,
"finder": self.finder,
"use_user_site": False,
"download_dir": download_dir,
"build_tracker": build_tracker,
}
preparer = self.command.make_requirement_preparer(**preparer_kwargs)
reqset = RequirementSet()
ireq.user_supplied = True
if getattr(ireq, "name", None):
reqset.add_named_requirement(ireq)
else:
reqset.add_unnamed_requirement(ireq)
resolver = self.command.make_resolver(
preparer=preparer,
finder=self.finder,
options=self.options,
wheel_cache=wheel_cache,
use_user_site=False,
ignore_installed=True,
ignore_requires_python=False,
force_reinstall=False,
upgrade_strategy="to-satisfy-only",
)
results = resolver._resolve_one(reqset, ireq)
if not ireq.prepared:
# If still not prepared, e.g. a constraint, do enough to assign
# the ireq a name:
resolver._get_dist_for(ireq)
return set(results)
def get_dependencies(self, ireq: InstallRequirement) -> set[InstallRequirement]:
"""
Given a pinned, URL, or editable InstallRequirement, returns a set of
dependencies (also InstallRequirements, but not necessarily pinned).
They indicate the secondary dependencies for the given requirement.
"""
if not (
ireq.editable or is_url_requirement(ireq) or is_pinned_requirement(ireq)
):
raise TypeError(
f"Expected url, pinned or editable InstallRequirement, got {ireq}"
)
if ireq not in self._dependencies_cache:
if ireq.editable and (ireq.source_dir and os.path.exists(ireq.source_dir)):
# No download_dir for locally available editable requirements.
# If a download_dir is passed, pip will unnecessarily archive
# the entire source directory
download_dir = None
elif ireq.link and ireq.link.is_vcs:
# No download_dir for VCS sources. This also works around pip
# using git-checkout-index, which gets rid of the .git dir.
download_dir = None
else:
download_dir = self._get_download_path(ireq)
os.makedirs(download_dir, exist_ok=True)
with global_tempdir_manager():
wheel_cache = create_wheel_cache(
cache_dir=self._cache_dir,
format_control=self.options.format_control,
)
self._dependencies_cache[ireq] = self.resolve_reqs(
download_dir, ireq, wheel_cache
)
return self._dependencies_cache[ireq]
def _get_project(self, ireq: InstallRequirement) -> _t.Any:
"""
Return a dict of a project info from PyPI JSON API for a given
InstallRequirement. Return None on HTTP/JSON error or if a package
is not found on PyPI server.
API reference: https://warehouse.readthedocs.io/api-reference/json/
"""
package_indexes = (
PackageIndex(url=index_url, file_storage_domain="")
for index_url in self.finder.search_scope.index_urls
)
for package_index in package_indexes:
url = f"{package_index.pypi_url}/{ireq.name}/json"
try:
response = self.session.get(url)
except RequestException as e:
log.debug(f"Fetch package info from PyPI failed: {url}: {e}")
continue
# Skip this PyPI server, because there is no package
# or JSON API might be not supported
if response.status_code == 404:
continue
try:
data = response.json()
except ValueError as e:
log.debug(f"Cannot parse JSON response from PyPI: {url}: {e}")
continue
return data
return None
def _get_download_path(self, ireq: InstallRequirement) -> str:
"""
Determine the download dir location in a way which avoids name
collisions.
"""
if ireq.link:
salt = hashlib.sha224(ireq.link.url_without_fragment.encode()).hexdigest()
# Nest directories to avoid running out of top level dirs on some FS
# (see pypi _get_cache_path_parts, which inspired this)
return os.path.join(
self._download_dir, salt[:2], salt[2:4], salt[4:6], salt[6:]
)
else:
return self._download_dir
def get_hashes(self, ireq: InstallRequirement) -> set[str]:
"""
Given an InstallRequirement, return a set of hashes that represent all
of the files for a given requirement. Unhashable requirements return an
empty set. Unpinned requirements raise a TypeError.
"""
if ireq.link:
link = ireq.link
if link.is_vcs or (link.is_file and link.is_existing_dir()):
# Return empty set for unhashable requirements.
# Unhashable logic modeled on pip's
# RequirementPreparer.prepare_linked_requirement
return set()
if is_url_requirement(ireq):
# Directly hash URL requirements.
# URL requirements may have been previously downloaded and cached
# locally by self.resolve_reqs()
cached_path = os.path.join(self._get_download_path(ireq), link.filename)
if os.path.exists(cached_path):
cached_link = Link(path_to_url(cached_path))
else:
cached_link = link
return {self._get_file_hash(cached_link)}
if not is_pinned_requirement(ireq):
raise TypeError(f"Expected pinned requirement, got {ireq}")
log.debug(ireq.name)
with log.indentation():
return self._get_req_hashes(ireq)
def _get_req_hashes(self, ireq: InstallRequirement) -> set[str]:
"""
Collects the hashes for all candidates satisfying the given InstallRequirement. Computes
the hashes for the candidates that don't have one reported by their index.
"""
matching_candidates = self._get_matching_candidates(ireq)
pypi_hashes_by_link = self._get_hashes_from_pypi(ireq)
pypi_hashes = {
pypi_hashes_by_link[candidate.link.url]
for candidate in matching_candidates
if candidate.link.url in pypi_hashes_by_link
}
local_hashes = {
self._get_file_hash(candidate.link)
for candidate in matching_candidates
if candidate.link.url not in pypi_hashes_by_link
}
return pypi_hashes | local_hashes
def _get_hashes_from_pypi(self, ireq: InstallRequirement) -> dict[str, str]:
"""
Builds a mapping from the release URLs to their hashes as reported by the PyPI JSON API
for a given InstallRequirement.
"""
project = self._get_project(ireq)
if project is None:
return {}
_, version, _ = as_tuple(ireq)
try:
release_files = project["releases"][version]
except KeyError:
log.debug("Missing release files on PyPI")
return {}
try:
hashes = {
file_["url"]: f"{FAVORITE_HASH}:{file_['digests'][FAVORITE_HASH]}"
for file_ in release_files
if file_["packagetype"] in self.HASHABLE_PACKAGE_TYPES
}
except KeyError:
log.debug("Missing digests of release files on PyPI")
return {}
return hashes
def _get_matching_candidates(
self, ireq: InstallRequirement
) -> set[InstallationCandidate]:
"""
Returns all candidates that satisfy the given InstallRequirement.
"""
# We need to get all of the candidates that match our current version
# pin, these will represent all of the files that could possibly
# satisfy this constraint.
all_candidates = self.find_all_candidates(ireq.name)
candidates_by_version = lookup_table(all_candidates, key=candidate_version)
matching_versions = list(
ireq.specifier.filter(candidate.version for candidate in all_candidates)
)
return candidates_by_version[matching_versions[0]]
def _get_file_hash(self, link: Link) -> str:
log.debug(f"Hashing {link.show_url}")
h = hashlib.new(FAVORITE_HASH)
with open_local_or_remote_file(link, self.session) as f:
# Chunks to iterate
chunks = iter(lambda: f.stream.read(FILE_CHUNK_SIZE), b"")
# Choose a context manager depending on verbosity
context_manager: _t.ContextManager[Iterator[bytes]]
if log.verbosity >= 1:
iter_length = int(f.size / FILE_CHUNK_SIZE) if f.size else None
bar_template = f"{' ' * log.current_indent} |%(bar)s| %(info)s"
context_manager = progressbar(
chunks,
length=iter_length,
# Make it look like default pip progress bar
fill_char="█",
empty_char=" ",
bar_template=bar_template,
width=32,
)
else:
context_manager = contextlib.nullcontext(chunks)
# Iterate over the chosen context manager
with context_manager as bar:
for chunk in bar:
h.update(chunk)
return ":".join([FAVORITE_HASH, h.hexdigest()])
@contextmanager
def allow_all_wheels(self) -> Iterator[None]:
"""
Monkey patches pip.Wheel to allow wheels from all platforms and Python versions.
This also saves the candidate cache and set a new one, or else the results from
the previous non-patched calls will interfere.
"""
def _wheel_supported(self: Wheel, tags: list[Tag]) -> bool:
# Ignore current platform. Support everything.
return True
def _wheel_support_index_min(self: Wheel, tags: list[Tag]) -> int:
# All wheels are equal priority for sorting.
return 0
original_wheel_supported = Wheel.supported
original_support_index_min = Wheel.support_index_min
original_cache = self._available_candidates_cache
Wheel.supported = _wheel_supported
Wheel.support_index_min = _wheel_support_index_min
self._available_candidates_cache = {}
# Finder internally caches results, and there is no public method to
# clear the cache, so we re-create the object here. If we don't clear
# this cache then it can contain results from an earlier call when
# allow_all_wheels wasn't active. See GH-1532
self._finder = self.command._build_package_finder(
options=self.options, session=self.session
)
try:
yield
finally:
Wheel.supported = original_wheel_supported
Wheel.support_index_min = original_support_index_min
self._available_candidates_cache = original_cache
@contextmanager
def open_local_or_remote_file(link: Link, session: Session) -> Iterator[FileStream]:
"""
Open local or remote file for reading.
:type link: pip.index.Link
:type session: requests.Session
:raises ValueError: If link points to a local directory.
:return: a context manager to a FileStream with the opened file-like object
"""
url = link.url_without_fragment
if link.is_file:
# Local URL
local_path = url_to_path(url)
if os.path.isdir(local_path):
raise ValueError(f"Cannot open directory for read: {url}")
else:
st = os.stat(local_path)
with open(local_path, "rb") as local_file:
yield FileStream(stream=local_file, size=st.st_size)
else:
# Remote URL
headers = {"Accept-Encoding": "identity"}
response = session.get(url, headers=headers, stream=True)
# Content length must be int or None
content_length: int | None
try:
content_length = int(response.headers["content-length"])
except (ValueError, KeyError, TypeError):
content_length = None
try:
yield FileStream(stream=response.raw, size=content_length)
finally:
response.close()
def candidate_version(candidate: InstallationCandidate) -> _BaseVersion:
return candidate.version
| PyPIRepository |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 26657,
"end": 29691
} | class ____(CondCases):
@pytest.mark.parametrize('is_complex', [False, True])
def test_basic_nonsvd(self, is_complex):
# Smoketest the non-svd norms
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])
if is_complex:
# Since A is linearly scaled, the condition number should not change
A = A * (1 + 1j)
assert_almost_equal(linalg.cond(A, inf), 4)
assert_almost_equal(linalg.cond(A, -inf), 2 / 3)
assert_almost_equal(linalg.cond(A, 1), 4)
assert_almost_equal(linalg.cond(A, -1), 0.5)
assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12))
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
@pytest.mark.parametrize('norm_ord', [1, -1, 2, -2, 'fro', np.inf, -np.inf])
def test_cond_dtypes(self, dtype, norm_ord):
# Check that the condition number is computed in the same dtype
# as the input matrix
A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]], dtype=dtype)
out_type = get_real_dtype(dtype)
assert_equal(linalg.cond(A, p=norm_ord).dtype, out_type)
def test_singular(self):
# Singular matrices have infinite condition number for
# positive norms, and negative norms shouldn't raise
# exceptions
As = [np.zeros((2, 2)), np.ones((2, 2))]
p_pos = [None, 1, 2, 'fro']
p_neg = [-1, -2]
for A, p in itertools.product(As, p_pos):
# Inversion may not hit exact infinity, so just check the
# number is large
assert_(linalg.cond(A, p) > 1e15)
for A, p in itertools.product(As, p_neg):
linalg.cond(A, p)
@pytest.mark.xfail(True, run=False,
reason="Platform/LAPACK-dependent failure, "
"see gh-18914")
def test_nan(self):
# nans should be passed through, not converted to infs
ps = [None, 1, -1, 2, -2, 'fro']
p_pos = [None, 1, 2, 'fro']
A = np.ones((2, 2))
A[0, 1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(isinstance(c, np.float64))
assert_(np.isnan(c))
A = np.ones((3, 2, 2))
A[1, 0, 1] = np.nan
for p in ps:
c = linalg.cond(A, p)
assert_(np.isnan(c[1]))
if p in p_pos:
assert_(c[0] > 1e15)
assert_(c[2] > 1e15)
else:
assert_(not np.isnan(c[0]))
assert_(not np.isnan(c[2]))
def test_stacked_singular(self):
# Check behavior when only some of the stacked matrices are
# singular
np.random.seed(1234)
A = np.random.rand(2, 2, 2, 2)
A[0, 0] = 0
A[1, 1] = 0
for p in (None, 1, 2, 'fro', -1, -2):
c = linalg.cond(A, p)
assert_equal(c[0, 0], np.inf)
assert_equal(c[1, 1], np.inf)
assert_(np.isfinite(c[0, 1]))
assert_(np.isfinite(c[1, 0]))
| TestCond |
python | Textualize__textual | src/textual/events.py | {
"start": 6285,
"end": 6755
} | class ____(Event, bubble=False):
"""Mouse has been released.
- [ ] Bubbles
- [ ] Verbose
Args:
mouse_position: The position of the mouse when released.
"""
def __init__(self, mouse_position: Offset) -> None:
super().__init__()
self.mouse_position = mouse_position
"""The position of the mouse when released."""
def __rich_repr__(self) -> rich.repr.Result:
yield None, self.mouse_position
| MouseRelease |
python | plotly__plotly.py | plotly/graph_objs/pie/_title.py | {
"start": 233,
"end": 3655
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "pie"
_path_str = "pie.title"
_valid_props = {"font", "position", "text"}
@property
def font(self):
"""
Sets the font used for `title`.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.pie.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.pie.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def position(self):
"""
Specifies the location of the `title`.
The 'position' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle center',
'bottom left', 'bottom center', 'bottom right']
Returns
-------
Any
"""
return self["position"]
@position.setter
def position(self, val):
self["position"] = val
@property
def text(self):
"""
Sets the title of the chart. If it is empty, no title is
displayed.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets the font used for `title`.
position
Specifies the location of the `title`.
text
Sets the title of the chart. If it is empty, no title
is displayed.
"""
def __init__(self, arg=None, font=None, position=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.pie.Title`
font
Sets the font used for `title`.
position
Specifies the location of the `title`.
text
Sets the title of the chart. If it is empty, no title
is displayed.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.pie.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.pie.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("position", arg, position)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | Textualize__textual | docs/examples/guide/actions/actions05.py | {
"start": 250,
"end": 377
} | class ____(Static):
def action_set_background(self, color: str) -> None:
self.styles.background = color
| ColorSwitcher |
python | doocs__leetcode | solution/3700-3799/3702.Longest Subsequence With Non-Zero Bitwise XOR/Solution.py | {
"start": 0,
"end": 296
} | class ____:
def longestSubsequence(self, nums: List[int]) -> int:
n = len(nums)
xor = cnt0 = 0
for x in nums:
xor ^= x
cnt0 += int(x == 0)
if xor:
return n
if cnt0 == n:
return 0
return n - 1
| Solution |
python | scikit-learn__scikit-learn | sklearn/datasets/tests/test_openml.py | {
"start": 884,
"end": 54649
} | class ____:
def __init__(self, data, is_gzip):
self.data = data
self.is_gzip = is_gzip
def read(self, amt=-1):
return self.data.read(amt)
def close(self):
self.data.close()
def info(self):
if self.is_gzip:
return {"Content-Encoding": "gzip"}
return {}
def __iter__(self):
return iter(self.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
# Disable the disk-based cache when testing `fetch_openml`:
# the mock data in sklearn/datasets/tests/data/openml/ is not always consistent
# with the version on openml.org. If one were to load the dataset outside of
# the tests, it may result in data that does not represent openml.org.
fetch_openml = partial(fetch_openml_orig, data_home=None)
def _monkey_patch_webbased_functions(context, data_id, gzip_response):
# monkey patches the urlopen function. Important note: Do NOT use this
# in combination with a regular cache directory, as the files that are
# stored as cache should not be mixed up with real openml datasets
url_prefix_data_description = "https://api.openml.org/api/v1/json/data/"
url_prefix_data_features = "https://api.openml.org/api/v1/json/data/features/"
url_prefix_download_data = "https://www.openml.org/data/v1/download"
url_prefix_data_list = "https://api.openml.org/api/v1/json/data/list/"
path_suffix = ".gz"
read_fn = gzip.open
data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}"
def _file_name(url, suffix):
output = (
re.sub(r"\W", "-", url[len("https://api.openml.org/") :])
+ suffix
+ path_suffix
)
# Shorten the filenames to have better compatibility with windows 10
# and filenames > 260 characters
return (
output.replace("-json-data-list", "-jdl")
.replace("-json-data-features", "-jdf")
.replace("-json-data-qualities", "-jdq")
.replace("-json-data", "-jd")
.replace("-data_name", "-dn")
.replace("-download", "-dl")
.replace("-limit", "-l")
.replace("-data_version", "-dv")
.replace("-status", "-s")
.replace("-deactivated", "-dact")
.replace("-active", "-act")
)
def _mock_urlopen_shared(url, has_gzip_header, expected_prefix, suffix):
assert url.startswith(expected_prefix), (
f"{expected_prefix!r} does not match {url!r}"
)
data_file_name = _file_name(url, suffix)
data_file_path = resources.files(data_module) / data_file_name
with data_file_path.open("rb") as f:
if has_gzip_header and gzip_response:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, "rb")
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_description(url, has_gzip_header):
return _mock_urlopen_shared(
url=url,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_data_description,
suffix=".json",
)
def _mock_urlopen_data_features(url, has_gzip_header):
return _mock_urlopen_shared(
url=url,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_data_features,
suffix=".json",
)
def _mock_urlopen_download_data(url, has_gzip_header):
# For simplicity the mock filenames don't contain the filename, i.e.
# the last part of the data description url after the last /.
# For example for id_1, data description download url is:
# gunzip -c sklearn/datasets/tests/data/openml/id_1/api-v1-jd-1.json.gz | grep '"url" # noqa: E501
# "https:\/\/www.openml.org\/data\/v1\/download\/1\/anneal.arff"
# but the mock filename does not contain anneal.arff and is:
# sklearn/datasets/tests/data/openml/id_1/data-v1-dl-1.arff.gz.
# We only keep the part of the url before the last /
url_without_filename = url.rsplit("/", 1)[0]
return _mock_urlopen_shared(
url=url_without_filename,
has_gzip_header=has_gzip_header,
expected_prefix=url_prefix_download_data,
suffix=".arff",
)
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list), (
f"{url_prefix_data_list!r} does not match {url!r}"
)
data_file_name = _file_name(url, ".json")
data_file_path = resources.files(data_module) / data_file_name
# load the file itself, to simulate a http error
with data_file_path.open("rb") as f:
decompressed_f = read_fn(f, "rb")
decoded_s = decompressed_f.read().decode("utf-8")
json_data = json.loads(decoded_s)
if "error" in json_data:
raise HTTPError(
url=None, code=412, msg="Simulated mock error", hdrs=None, fp=BytesIO()
)
with data_file_path.open("rb") as f:
if has_gzip_header:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
decompressed_f = read_fn(f, "rb")
fp = BytesIO(decompressed_f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request, *args, **kwargs):
url = request.get_full_url()
has_gzip_header = request.get_header("Accept-encoding") == "gzip"
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError("Unknown mocking URL pattern: %s" % url)
# XXX: Global variable
if test_offline:
context.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen)
###############################################################################
# Test the behaviour of `fetch_openml` depending of the input parameters.
@pytest.mark.parametrize(
"data_id, dataset_params, n_samples, n_features, n_targets",
[
# iris
(61, {"data_id": 61}, 150, 4, 1),
(61, {"name": "iris", "version": 1}, 150, 4, 1),
# anneal
(2, {"data_id": 2}, 11, 38, 1),
(2, {"name": "anneal", "version": 1}, 11, 38, 1),
# cpu
(561, {"data_id": 561}, 209, 7, 1),
(561, {"name": "cpu", "version": 1}, 209, 7, 1),
# emotions
(40589, {"data_id": 40589}, 13, 72, 6),
# adult-census
(1119, {"data_id": 1119}, 10, 14, 1),
(1119, {"name": "adult-census"}, 10, 14, 1),
# miceprotein
(40966, {"data_id": 40966}, 7, 77, 1),
(40966, {"name": "MiceProtein"}, 7, 77, 1),
# titanic
(40945, {"data_id": 40945}, 1309, 13, 1),
],
)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_as_frame_true(
monkeypatch,
data_id,
dataset_params,
n_samples,
n_features,
n_targets,
parser,
gzip_response,
):
"""Check the behaviour of `fetch_openml` with `as_frame=True`.
Fetch by ID and/or name (depending if the file was previously cached).
"""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
bunch = fetch_openml(
as_frame=True,
cache=False,
parser=parser,
**dataset_params,
)
assert int(bunch.details["id"]) == data_id
assert isinstance(bunch, Bunch)
assert isinstance(bunch.frame, pd.DataFrame)
assert bunch.frame.shape == (n_samples, n_features + n_targets)
assert isinstance(bunch.data, pd.DataFrame)
assert bunch.data.shape == (n_samples, n_features)
if n_targets == 1:
assert isinstance(bunch.target, pd.Series)
assert bunch.target.shape == (n_samples,)
else:
assert isinstance(bunch.target, pd.DataFrame)
assert bunch.target.shape == (n_samples, n_targets)
assert bunch.categories is None
@pytest.mark.parametrize(
"data_id, dataset_params, n_samples, n_features, n_targets",
[
# iris
(61, {"data_id": 61}, 150, 4, 1),
(61, {"name": "iris", "version": 1}, 150, 4, 1),
# anneal
(2, {"data_id": 2}, 11, 38, 1),
(2, {"name": "anneal", "version": 1}, 11, 38, 1),
# cpu
(561, {"data_id": 561}, 209, 7, 1),
(561, {"name": "cpu", "version": 1}, 209, 7, 1),
# emotions
(40589, {"data_id": 40589}, 13, 72, 6),
# adult-census
(1119, {"data_id": 1119}, 10, 14, 1),
(1119, {"name": "adult-census"}, 10, 14, 1),
# miceprotein
(40966, {"data_id": 40966}, 7, 77, 1),
(40966, {"name": "MiceProtein"}, 7, 77, 1),
],
)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_as_frame_false(
monkeypatch,
data_id,
dataset_params,
n_samples,
n_features,
n_targets,
parser,
):
"""Check the behaviour of `fetch_openml` with `as_frame=False`.
Fetch both by ID and/or name + version.
"""
pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
as_frame=False,
cache=False,
parser=parser,
**dataset_params,
)
assert int(bunch.details["id"]) == data_id
assert isinstance(bunch, Bunch)
assert bunch.frame is None
assert isinstance(bunch.data, np.ndarray)
assert bunch.data.shape == (n_samples, n_features)
assert isinstance(bunch.target, np.ndarray)
if n_targets == 1:
assert bunch.target.shape == (n_samples,)
else:
assert bunch.target.shape == (n_samples, n_targets)
assert isinstance(bunch.categories, dict)
@pytest.mark.parametrize("data_id", [61, 1119, 40945])
def test_fetch_openml_consistency_parser(monkeypatch, data_id):
"""Check the consistency of the LIAC-ARFF and pandas parsers."""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch_liac = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="liac-arff",
)
bunch_pandas = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="pandas",
)
# The data frames for the input features should match up to some numerical
# dtype conversions (e.g. float64 <=> Int64) due to limitations of the
# LIAC-ARFF parser.
data_liac, data_pandas = bunch_liac.data, bunch_pandas.data
def convert_numerical_dtypes(series):
pandas_series = data_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
else:
return series
data_liac_with_fixed_dtypes = data_liac.apply(convert_numerical_dtypes)
pd.testing.assert_frame_equal(data_liac_with_fixed_dtypes, data_pandas)
# Let's also check that the .frame attributes also match
frame_liac, frame_pandas = bunch_liac.frame, bunch_pandas.frame
# Note that the .frame attribute is a superset of the .data attribute:
pd.testing.assert_frame_equal(frame_pandas[bunch_pandas.feature_names], data_pandas)
# However the remaining columns, typically the target(s), are not necessarily
# dtyped similarly by both parsers due to limitations of the LIAC-ARFF parser.
# Therefore, extra dtype conversions are required for those columns:
def convert_numerical_and_categorical_dtypes(series):
pandas_series = frame_pandas[series.name]
if pd.api.types.is_numeric_dtype(pandas_series):
return series.astype(pandas_series.dtype)
elif isinstance(pandas_series.dtype, pd.CategoricalDtype):
# Compare categorical features by converting categorical liac uses
# strings to denote the categories, we rename the categories to make
# them comparable to the pandas parser. Fixing this behavior in
# LIAC-ARFF would allow to check the consistency in the future but
# we do not plan to maintain the LIAC-ARFF on the long term.
return series.cat.rename_categories(pandas_series.cat.categories)
else:
return series
frame_liac_with_fixed_dtypes = frame_liac.apply(
convert_numerical_and_categorical_dtypes
)
pd.testing.assert_frame_equal(frame_liac_with_fixed_dtypes, frame_pandas)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser):
"""Check the equivalence of the dataset when using `as_frame=False` and
`as_frame=True`.
"""
pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch_as_frame_true = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
bunch_as_frame_false = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
parser=parser,
)
assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data)
assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_iris_pandas(monkeypatch, parser):
"""Check fetching on a numerical only dataset with string labels."""
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 4)
target_shape = (150,)
frame_shape = (150, 5)
target_dtype = CategoricalDtype(
["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
)
data_dtypes = [np.float64] * 4
data_names = ["sepallength", "sepalwidth", "petallength", "petalwidth"]
target_name = "class"
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.dtype == target_dtype
assert target.shape == target_shape
assert target.name == target_name
assert target.index.is_unique
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == data_dtypes + [target_dtype])
assert frame.index.is_unique
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
@pytest.mark.parametrize("target_column", ["petalwidth", ["petalwidth", "petallength"]])
def test_fetch_openml_forcing_targets(monkeypatch, parser, target_column):
"""Check that we can force the target to not be the default target."""
pd = pytest.importorskip("pandas")
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch_forcing_target = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
target_column=target_column,
parser=parser,
)
bunch_default = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
pd.testing.assert_frame_equal(bunch_forcing_target.frame, bunch_default.frame)
if isinstance(target_column, list):
pd.testing.assert_index_equal(
bunch_forcing_target.target.columns, pd.Index(target_column)
)
assert bunch_forcing_target.data.shape == (150, 3)
else:
assert bunch_forcing_target.target.name == target_column
assert bunch_forcing_target.data.shape == (150, 4)
@pytest.mark.parametrize("data_id", [61, 2, 561, 40589, 1119])
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_equivalence_frame_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=True`."""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
return_X_y=False,
parser=parser,
)
X, y = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
return_X_y=True,
parser=parser,
)
pd.testing.assert_frame_equal(bunch.data, X)
if isinstance(y, pd.Series):
pd.testing.assert_series_equal(bunch.target, y)
else:
pd.testing.assert_frame_equal(bunch.target, y)
@pytest.mark.parametrize("data_id", [61, 561, 40589, 1119])
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_equivalence_array_return_X_y(monkeypatch, data_id, parser):
"""Check the behaviour of `return_X_y=True` when `as_frame=False`."""
pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
bunch = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
return_X_y=False,
parser=parser,
)
X, y = fetch_openml(
data_id=data_id,
as_frame=False,
cache=False,
return_X_y=True,
parser=parser,
)
assert_array_equal(bunch.data, X)
assert_array_equal(bunch.target, y)
def test_fetch_openml_difference_parsers(monkeypatch):
"""Check the difference between liac-arff and pandas parser."""
pytest.importorskip("pandas")
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True)
# When `as_frame=False`, the categories will be ordinally encoded with
# liac-arff parser while this is not the case with pandas parser.
as_frame = False
bunch_liac_arff = fetch_openml(
data_id=data_id,
as_frame=as_frame,
cache=False,
parser="liac-arff",
)
bunch_pandas = fetch_openml(
data_id=data_id,
as_frame=as_frame,
cache=False,
parser="pandas",
)
assert bunch_liac_arff.data.dtype.kind == "f"
assert bunch_pandas.data.dtype == "O"
###############################################################################
# Test the ARFF parsing on several dataset to check if detect the correct
# types (categories, integers, floats).
@pytest.fixture(scope="module")
def datasets_column_names():
"""Returns the columns names for each dataset."""
return {
61: ["sepallength", "sepalwidth", "petallength", "petalwidth", "class"],
2: [
"family",
"product-type",
"steel",
"carbon",
"hardness",
"temper_rolling",
"condition",
"formability",
"strength",
"non-ageing",
"surface-finish",
"surface-quality",
"enamelability",
"bc",
"bf",
"bt",
"bw%2Fme",
"bl",
"m",
"chrom",
"phos",
"cbond",
"marvi",
"exptl",
"ferro",
"corr",
"blue%2Fbright%2Fvarn%2Fclean",
"lustre",
"jurofm",
"s",
"p",
"shape",
"thick",
"width",
"len",
"oil",
"bore",
"packing",
"class",
],
561: ["vendor", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "class"],
40589: [
"Mean_Acc1298_Mean_Mem40_Centroid",
"Mean_Acc1298_Mean_Mem40_Rolloff",
"Mean_Acc1298_Mean_Mem40_Flux",
"Mean_Acc1298_Mean_Mem40_MFCC_0",
"Mean_Acc1298_Mean_Mem40_MFCC_1",
"Mean_Acc1298_Mean_Mem40_MFCC_2",
"Mean_Acc1298_Mean_Mem40_MFCC_3",
"Mean_Acc1298_Mean_Mem40_MFCC_4",
"Mean_Acc1298_Mean_Mem40_MFCC_5",
"Mean_Acc1298_Mean_Mem40_MFCC_6",
"Mean_Acc1298_Mean_Mem40_MFCC_7",
"Mean_Acc1298_Mean_Mem40_MFCC_8",
"Mean_Acc1298_Mean_Mem40_MFCC_9",
"Mean_Acc1298_Mean_Mem40_MFCC_10",
"Mean_Acc1298_Mean_Mem40_MFCC_11",
"Mean_Acc1298_Mean_Mem40_MFCC_12",
"Mean_Acc1298_Std_Mem40_Centroid",
"Mean_Acc1298_Std_Mem40_Rolloff",
"Mean_Acc1298_Std_Mem40_Flux",
"Mean_Acc1298_Std_Mem40_MFCC_0",
"Mean_Acc1298_Std_Mem40_MFCC_1",
"Mean_Acc1298_Std_Mem40_MFCC_2",
"Mean_Acc1298_Std_Mem40_MFCC_3",
"Mean_Acc1298_Std_Mem40_MFCC_4",
"Mean_Acc1298_Std_Mem40_MFCC_5",
"Mean_Acc1298_Std_Mem40_MFCC_6",
"Mean_Acc1298_Std_Mem40_MFCC_7",
"Mean_Acc1298_Std_Mem40_MFCC_8",
"Mean_Acc1298_Std_Mem40_MFCC_9",
"Mean_Acc1298_Std_Mem40_MFCC_10",
"Mean_Acc1298_Std_Mem40_MFCC_11",
"Mean_Acc1298_Std_Mem40_MFCC_12",
"Std_Acc1298_Mean_Mem40_Centroid",
"Std_Acc1298_Mean_Mem40_Rolloff",
"Std_Acc1298_Mean_Mem40_Flux",
"Std_Acc1298_Mean_Mem40_MFCC_0",
"Std_Acc1298_Mean_Mem40_MFCC_1",
"Std_Acc1298_Mean_Mem40_MFCC_2",
"Std_Acc1298_Mean_Mem40_MFCC_3",
"Std_Acc1298_Mean_Mem40_MFCC_4",
"Std_Acc1298_Mean_Mem40_MFCC_5",
"Std_Acc1298_Mean_Mem40_MFCC_6",
"Std_Acc1298_Mean_Mem40_MFCC_7",
"Std_Acc1298_Mean_Mem40_MFCC_8",
"Std_Acc1298_Mean_Mem40_MFCC_9",
"Std_Acc1298_Mean_Mem40_MFCC_10",
"Std_Acc1298_Mean_Mem40_MFCC_11",
"Std_Acc1298_Mean_Mem40_MFCC_12",
"Std_Acc1298_Std_Mem40_Centroid",
"Std_Acc1298_Std_Mem40_Rolloff",
"Std_Acc1298_Std_Mem40_Flux",
"Std_Acc1298_Std_Mem40_MFCC_0",
"Std_Acc1298_Std_Mem40_MFCC_1",
"Std_Acc1298_Std_Mem40_MFCC_2",
"Std_Acc1298_Std_Mem40_MFCC_3",
"Std_Acc1298_Std_Mem40_MFCC_4",
"Std_Acc1298_Std_Mem40_MFCC_5",
"Std_Acc1298_Std_Mem40_MFCC_6",
"Std_Acc1298_Std_Mem40_MFCC_7",
"Std_Acc1298_Std_Mem40_MFCC_8",
"Std_Acc1298_Std_Mem40_MFCC_9",
"Std_Acc1298_Std_Mem40_MFCC_10",
"Std_Acc1298_Std_Mem40_MFCC_11",
"Std_Acc1298_Std_Mem40_MFCC_12",
"BH_LowPeakAmp",
"BH_LowPeakBPM",
"BH_HighPeakAmp",
"BH_HighPeakBPM",
"BH_HighLowRatio",
"BHSUM1",
"BHSUM2",
"BHSUM3",
"amazed.suprised",
"happy.pleased",
"relaxing.calm",
"quiet.still",
"sad.lonely",
"angry.aggresive",
],
1119: [
"age",
"workclass",
"fnlwgt:",
"education:",
"education-num:",
"marital-status:",
"occupation:",
"relationship:",
"race:",
"sex:",
"capital-gain:",
"capital-loss:",
"hours-per-week:",
"native-country:",
"class",
],
40966: [
"DYRK1A_N",
"ITSN1_N",
"BDNF_N",
"NR1_N",
"NR2A_N",
"pAKT_N",
"pBRAF_N",
"pCAMKII_N",
"pCREB_N",
"pELK_N",
"pERK_N",
"pJNK_N",
"PKCA_N",
"pMEK_N",
"pNR1_N",
"pNR2A_N",
"pNR2B_N",
"pPKCAB_N",
"pRSK_N",
"AKT_N",
"BRAF_N",
"CAMKII_N",
"CREB_N",
"ELK_N",
"ERK_N",
"GSK3B_N",
"JNK_N",
"MEK_N",
"TRKA_N",
"RSK_N",
"APP_N",
"Bcatenin_N",
"SOD1_N",
"MTOR_N",
"P38_N",
"pMTOR_N",
"DSCR1_N",
"AMPKA_N",
"NR2B_N",
"pNUMB_N",
"RAPTOR_N",
"TIAM1_N",
"pP70S6_N",
"NUMB_N",
"P70S6_N",
"pGSK3B_N",
"pPKCG_N",
"CDK5_N",
"S6_N",
"ADARB1_N",
"AcetylH3K9_N",
"RRP1_N",
"BAX_N",
"ARC_N",
"ERBB4_N",
"nNOS_N",
"Tau_N",
"GFAP_N",
"GluR3_N",
"GluR4_N",
"IL1B_N",
"P3525_N",
"pCASP9_N",
"PSD95_N",
"SNCA_N",
"Ubiquitin_N",
"pGSK3B_Tyr216_N",
"SHH_N",
"BAD_N",
"BCL2_N",
"pS6_N",
"pCFOS_N",
"SYP_N",
"H3AcK18_N",
"EGR1_N",
"H3MeK4_N",
"CaNA_N",
"class",
],
40945: [
"pclass",
"survived",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
"boat",
"body",
"home.dest",
],
}
@pytest.fixture(scope="module")
def datasets_missing_values():
return {
61: {},
2: {
"family": 11,
"temper_rolling": 9,
"condition": 2,
"formability": 4,
"non-ageing": 10,
"surface-finish": 11,
"enamelability": 11,
"bc": 11,
"bf": 10,
"bt": 11,
"bw%2Fme": 8,
"bl": 9,
"m": 11,
"chrom": 11,
"phos": 11,
"cbond": 10,
"marvi": 11,
"exptl": 11,
"ferro": 11,
"corr": 11,
"blue%2Fbright%2Fvarn%2Fclean": 11,
"lustre": 8,
"jurofm": 11,
"s": 11,
"p": 11,
"oil": 10,
"packing": 11,
},
561: {},
40589: {},
1119: {},
40966: {"BCL2_N": 7},
40945: {
"age": 263,
"fare": 1,
"cabin": 1014,
"embarked": 2,
"boat": 823,
"body": 1188,
"home.dest": 564,
},
}
@pytest.mark.parametrize(
"data_id, parser, expected_n_categories, expected_n_floats, expected_n_ints",
[
# iris dataset
(61, "liac-arff", 1, 4, 0),
(61, "pandas", 1, 4, 0),
# anneal dataset
(2, "liac-arff", 33, 6, 0),
(2, "pandas", 33, 2, 4),
# cpu dataset
(561, "liac-arff", 1, 7, 0),
(561, "pandas", 1, 0, 7),
# emotions dataset
(40589, "liac-arff", 6, 72, 0),
(40589, "pandas", 6, 69, 3),
# adult-census dataset
(1119, "liac-arff", 9, 6, 0),
(1119, "pandas", 9, 0, 6),
# miceprotein
(40966, "liac-arff", 1, 77, 0),
(40966, "pandas", 1, 77, 0),
# titanic
(40945, "liac-arff", 3, 6, 0),
(40945, "pandas", 3, 3, 3),
],
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_types_inference(
monkeypatch,
data_id,
parser,
expected_n_categories,
expected_n_floats,
expected_n_ints,
gzip_response,
datasets_column_names,
datasets_missing_values,
):
"""Check that `fetch_openml` infer the right number of categories, integers, and
floats."""
pd = pytest.importorskip("pandas")
CategoricalDtype = pd.api.types.CategoricalDtype
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
bunch = fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser=parser,
)
frame = bunch.frame
n_categories = len(
[dtype for dtype in frame.dtypes if isinstance(dtype, CategoricalDtype)]
)
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == "f"])
n_ints = len([dtype for dtype in frame.dtypes if dtype.kind == "i"])
assert n_categories == expected_n_categories
assert n_floats == expected_n_floats
assert n_ints == expected_n_ints
assert frame.columns.tolist() == datasets_column_names[data_id]
frame_feature_to_n_nan = frame.isna().sum().to_dict()
for name, n_missing in frame_feature_to_n_nan.items():
expected_missing = datasets_missing_values[data_id].get(name, 0)
assert n_missing == expected_missing
###############################################################################
# Test some more specific behaviour
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"parser": "unknown"},
"The 'parser' parameter of fetch_openml must be a str among",
),
(
{"as_frame": "unknown"},
"The 'as_frame' parameter of fetch_openml must be an instance",
),
],
)
def test_fetch_openml_validation_parameter(monkeypatch, params, err_msg):
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
with pytest.raises(ValueError, match=err_msg):
fetch_openml(data_id=data_id, **params)
@pytest.mark.parametrize(
"params",
[
{"as_frame": True, "parser": "auto"},
{"as_frame": "auto", "parser": "auto"},
{"as_frame": False, "parser": "pandas"},
{"as_frame": False, "parser": "auto"},
],
)
def test_fetch_openml_requires_pandas_error(monkeypatch, params):
"""Check that we raise the proper errors when we require pandas."""
data_id = 1119
try:
check_pandas_support("test_fetch_openml_requires_pandas")
except ImportError:
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
err_msg = "requires pandas to be installed. Alternatively, explicitly"
with pytest.raises(ImportError, match=err_msg):
fetch_openml(data_id=data_id, **params)
else:
raise SkipTest("This test requires pandas to not be installed.")
@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive")
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"parser": "pandas"},
"Sparse ARFF datasets cannot be loaded with parser='pandas'",
),
(
{"as_frame": True},
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
),
(
{"parser": "pandas", "as_frame": True},
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
),
],
)
def test_fetch_openml_sparse_arff_error(monkeypatch, params, err_msg):
"""Check that we raise the expected error for sparse ARFF datasets and
a wrong set of incompatible parameters.
"""
pytest.importorskip("pandas")
data_id = 292
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
with pytest.raises(ValueError, match=err_msg):
fetch_openml(
data_id=data_id,
cache=False,
**params,
)
@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive")
@pytest.mark.parametrize(
"data_id, data_type",
[
(61, "dataframe"), # iris dataset version 1
(292, "sparse"), # Australian dataset version 1
],
)
def test_fetch_openml_auto_mode(monkeypatch, data_id, data_type):
"""Check the auto mode of `fetch_openml`."""
pd = pytest.importorskip("pandas")
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
data = fetch_openml(data_id=data_id, as_frame="auto", cache=False)
klass = pd.DataFrame if data_type == "dataframe" else scipy.sparse.csr_matrix
assert isinstance(data.data, klass)
def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch):
"""Check that we raise a warning regarding the working memory when using
LIAC-ARFF parser."""
pytest.importorskip("pandas")
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = "Could not adhere to working_memory config."
with pytest.warns(UserWarning, match=msg):
with config_context(working_memory=1e-6):
fetch_openml(
data_id=data_id,
as_frame=True,
cache=False,
parser="liac-arff",
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_iris_warn_multiple_version(monkeypatch, gzip_response):
"""Check that a warning is raised when multiple versions exist and no version is
requested."""
data_id = 61
data_name = "iris"
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = re.escape(
"Multiple active versions of the dataset matching the name"
" iris exist. Versions may be fundamentally different, "
"returning version 1. Available versions:\n"
"- version 1, status: active\n"
" url: https://www.openml.org/search?type=data&id=61\n"
"- version 3, status: active\n"
" url: https://www.openml.org/search?type=data&id=969\n"
)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
name=data_name,
as_frame=False,
cache=False,
parser="liac-arff",
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_no_target(monkeypatch, gzip_response):
"""Check that we can get a dataset without target."""
data_id = 61
target_column = None
expected_observations = 150
expected_features = 5
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
data = fetch_openml(
data_id=data_id,
target_column=target_column,
cache=False,
as_frame=False,
parser="liac-arff",
)
assert data.data.shape == (expected_observations, expected_features)
assert data.target is None
@pytest.mark.parametrize("gzip_response", [True, False])
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_missing_values_pandas(monkeypatch, gzip_response, parser):
"""check that missing values in categories are compatible with pandas
categorical"""
pytest.importorskip("pandas")
data_id = 42585
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
penguins = fetch_openml(
data_id=data_id,
cache=False,
as_frame=True,
parser=parser,
)
cat_dtype = penguins.data.dtypes["sex"]
# there are nans in the categorical
assert penguins.data["sex"].isna().any()
assert_array_equal(cat_dtype.categories, ["FEMALE", "MALE", "_"])
@pytest.mark.parametrize("gzip_response", [True, False])
@pytest.mark.parametrize(
"dataset_params",
[
{"data_id": 40675},
{"data_id": None, "name": "glass2", "version": 1},
],
)
def test_fetch_openml_inactive(monkeypatch, gzip_response, dataset_params):
"""Check that we raise a warning when the dataset is inactive."""
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "Version 1 of dataset glass2 is inactive,"
with pytest.warns(UserWarning, match=msg):
glass2 = fetch_openml(
cache=False, as_frame=False, parser="liac-arff", **dataset_params
)
assert glass2.data.shape == (163, 9)
assert glass2.details["id"] == "40675"
@pytest.mark.parametrize("gzip_response", [True, False])
@pytest.mark.parametrize(
"data_id, params, err_type, err_msg",
[
(40675, {"name": "glass2"}, ValueError, "No active dataset glass2 found"),
(
61,
{"data_id": 61, "target_column": ["sepalwidth", "class"]},
ValueError,
"Can only handle homogeneous multi-target datasets",
),
(
40945,
{"data_id": 40945, "as_frame": False},
ValueError,
(
"STRING attributes are not supported for array representation. Try"
" as_frame=True"
),
),
(
2,
{"data_id": 2, "target_column": "family", "as_frame": True},
ValueError,
"Target column 'family'",
),
(
2,
{"data_id": 2, "target_column": "family", "as_frame": False},
ValueError,
"Target column 'family'",
),
(
61,
{"data_id": 61, "target_column": "undefined"},
KeyError,
"Could not find target_column='undefined'",
),
(
61,
{"data_id": 61, "target_column": ["undefined", "class"]},
KeyError,
"Could not find target_column='undefined'",
),
],
)
@pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
def test_fetch_openml_error(
monkeypatch, gzip_response, data_id, params, err_type, err_msg, parser
):
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
if params.get("as_frame", True) or parser == "pandas":
pytest.importorskip("pandas")
with pytest.raises(err_type, match=err_msg):
fetch_openml(cache=False, parser=parser, **params)
@pytest.mark.parametrize(
"params, err_type, err_msg",
[
(
{"data_id": -1, "name": None, "version": "version"},
ValueError,
"The 'version' parameter of fetch_openml must be an int in the range",
),
(
{"data_id": -1, "name": "nAmE"},
ValueError,
"The 'data_id' parameter of fetch_openml must be an int in the range",
),
(
{"data_id": -1, "name": "nAmE", "version": "version"},
ValueError,
"The 'version' parameter of fetch_openml must be an int",
),
(
{},
ValueError,
"Neither name nor data_id are provided. Please provide name or data_id.",
),
],
)
def test_fetch_openml_raises_illegal_argument(params, err_type, err_msg):
with pytest.raises(err_type, match=err_msg):
fetch_openml(**params)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_warn_ignore_attribute(monkeypatch, gzip_response):
data_id = 40966
expected_row_id_msg = "target_column='{}' has flag is_row_identifier."
expected_ignore_msg = "target_column='{}' has flag is_ignore."
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
target_col = "MouseID"
msg = expected_row_id_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id,
target_column=target_col,
cache=False,
as_frame=False,
parser="liac-arff",
)
target_col = "Genotype"
msg = expected_ignore_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id,
target_column=target_col,
cache=False,
as_frame=False,
parser="liac-arff",
)
# multi column test
target_col = "MouseID"
msg = expected_row_id_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id,
target_column=[target_col, "class"],
cache=False,
as_frame=False,
parser="liac-arff",
)
target_col = "Genotype"
msg = expected_ignore_msg.format(target_col)
with pytest.warns(UserWarning, match=msg):
fetch_openml(
data_id=data_id,
target_column=[target_col, "class"],
cache=False,
as_frame=False,
parser="liac-arff",
)
@pytest.mark.parametrize("gzip_response", [True, False])
def test_dataset_with_openml_error(monkeypatch, gzip_response):
data_id = 1
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "OpenML registered a problem with the dataset. It might be unusable. Error:"
with pytest.warns(UserWarning, match=msg):
fetch_openml(data_id=data_id, cache=False, as_frame=False, parser="liac-arff")
@pytest.mark.parametrize("gzip_response", [True, False])
def test_dataset_with_openml_warning(monkeypatch, gzip_response):
data_id = 3
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
msg = "OpenML raised a warning on the dataset. It might be unusable. Warning:"
with pytest.warns(UserWarning, match=msg):
fetch_openml(data_id=data_id, cache=False, as_frame=False, parser="liac-arff")
def test_fetch_openml_overwrite_default_params_read_csv(monkeypatch):
"""Check that we can overwrite the default parameters of `read_csv`."""
pytest.importorskip("pandas")
data_id = 1590
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
common_params = {
"data_id": data_id,
"as_frame": True,
"cache": False,
"parser": "pandas",
}
# By default, the initial spaces are skipped. We checked that setting the parameter
# `skipinitialspace` to False will have an effect.
adult_without_spaces = fetch_openml(**common_params)
adult_with_spaces = fetch_openml(
**common_params, read_csv_kwargs={"skipinitialspace": False}
)
assert all(
cat.startswith(" ") for cat in adult_with_spaces.frame["class"].cat.categories
)
assert not any(
cat.startswith(" ")
for cat in adult_without_spaces.frame["class"].cat.categories
)
###############################################################################
# Test cache, retry mechanisms, checksum, etc.
@pytest.mark.parametrize("gzip_response", [True, False])
def test_open_openml_url_cache(monkeypatch, gzip_response, tmpdir):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
openml_path = _MONKEY_PATCH_LOCAL_OPENML_PATH.format(data_id) + "/filename.arff"
url = f"https://www.openml.org/{openml_path}"
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
# first fill the cache
response1 = _open_openml_url(url, cache_directory)
# assert file exists
location = _get_local_path(openml_path, cache_directory)
assert os.path.isfile(location)
# redownload, to utilize cache
response2 = _open_openml_url(url, cache_directory)
assert response1.read() == response2.read()
@pytest.mark.parametrize("write_to_disk", [True, False])
def test_open_openml_url_unlinks_local_path(monkeypatch, tmpdir, write_to_disk):
data_id = 61
openml_path = _MONKEY_PATCH_LOCAL_OPENML_PATH.format(data_id) + "/filename.arff"
url = f"https://www.openml.org/{openml_path}"
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
location = _get_local_path(openml_path, cache_directory)
def _mock_urlopen(request, *args, **kwargs):
if write_to_disk:
with open(location, "w") as f:
f.write("")
raise ValueError("Invalid request")
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen)
with pytest.raises(ValueError, match="Invalid request"):
_open_openml_url(url, cache_directory)
assert not os.path.exists(location)
def test_retry_with_clean_cache(tmpdir):
data_id = 61
openml_path = _MONKEY_PATCH_LOCAL_OPENML_PATH.format(data_id)
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
location = _get_local_path(openml_path, cache_directory)
os.makedirs(os.path.dirname(location))
with open(location, "w") as f:
f.write("")
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
# The first call will raise an error since location exists
if os.path.exists(location):
raise Exception("File exist!")
return 1
warn_msg = "Invalid cache, redownloading file"
with pytest.warns(RuntimeWarning, match=warn_msg):
result = _load_data()
assert result == 1
def test_retry_with_clean_cache_http_error(tmpdir):
data_id = 61
openml_path = _MONKEY_PATCH_LOCAL_OPENML_PATH.format(data_id)
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
raise HTTPError(
url=None, code=412, msg="Simulated mock error", hdrs=None, fp=BytesIO()
)
error_msg = "Simulated mock error"
with pytest.raises(HTTPError, match=error_msg):
_load_data()
@pytest.mark.parametrize("gzip_response", [True, False])
def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir):
def _mock_urlopen_raise(request, *args, **kwargs):
raise ValueError(
"This mechanism intends to test correct cache"
"handling. As such, urlopen should never be "
"accessed. URL: %s" % request.get_full_url()
)
data_id = 61
cache_directory = str(tmpdir.mkdir("scikit_learn_data"))
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
X_fetched, y_fetched = fetch_openml(
data_id=data_id,
cache=True,
data_home=cache_directory,
return_X_y=True,
as_frame=False,
parser="liac-arff",
)
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", _mock_urlopen_raise)
X_cached, y_cached = fetch_openml(
data_id=data_id,
cache=True,
data_home=cache_directory,
return_X_y=True,
as_frame=False,
parser="liac-arff",
)
np.testing.assert_array_equal(X_fetched, X_cached)
np.testing.assert_array_equal(y_fetched, y_cached)
@pytest.mark.parametrize(
"as_frame, parser",
[
(True, "liac-arff"),
(False, "liac-arff"),
(True, "pandas"),
(False, "pandas"),
],
)
def test_fetch_openml_verify_checksum(monkeypatch, as_frame, tmpdir, parser):
"""Check that the checksum is working as expected."""
if as_frame or parser == "pandas":
pytest.importorskip("pandas")
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
# create a temporary modified arff file
original_data_module = OPENML_TEST_DATA_MODULE + "." + f"id_{data_id}"
original_data_file_name = "data-v1-dl-1666876.arff.gz"
original_data_path = resources.files(original_data_module) / original_data_file_name
corrupt_copy_path = tmpdir / "test_invalid_checksum.arff"
with original_data_path.open("rb") as orig_file:
orig_gzip = gzip.open(orig_file, "rb")
data = bytearray(orig_gzip.read())
data[len(data) - 1] = 37
with gzip.GzipFile(corrupt_copy_path, "wb") as modified_gzip:
modified_gzip.write(data)
# Requests are already mocked by monkey_patch_webbased_functions.
# We want to reuse that mock for all requests except file download,
# hence creating a thin mock over the original mock
mocked_openml_url = sklearn.datasets._openml.urlopen
def swap_file_mock(request, *args, **kwargs):
url = request.get_full_url()
if url.endswith("data/v1/download/1666876/anneal.arff"):
with open(corrupt_copy_path, "rb") as f:
corrupted_data = f.read()
return _MockHTTPResponse(BytesIO(corrupted_data), is_gzip=True)
else:
return mocked_openml_url(request)
monkeypatch.setattr(sklearn.datasets._openml, "urlopen", swap_file_mock)
# validate failed checksum
with pytest.raises(ValueError) as exc:
sklearn.datasets.fetch_openml(
data_id=data_id, cache=False, as_frame=as_frame, parser=parser
)
# exception message should have file-path
assert exc.match("1666876")
def test_open_openml_url_retry_on_network_error(monkeypatch):
def _mock_urlopen_network_error(request, *args, **kwargs):
raise HTTPError(
url=None, code=404, msg="Simulated network error", hdrs=None, fp=BytesIO()
)
monkeypatch.setattr(
sklearn.datasets._openml, "urlopen", _mock_urlopen_network_error
)
invalid_openml_url = "https://api.openml.org/invalid-url"
with pytest.warns(
UserWarning,
match=re.escape(
"A network error occurred while downloading"
f" {invalid_openml_url}. Retrying..."
),
) as record:
with pytest.raises(HTTPError, match="Simulated network error") as exc_info:
_open_openml_url(invalid_openml_url, None, delay=0)
assert len(record) == 3
# Avoid a ResourceWarning on Python 3.14 and later.
exc_info.value.close()
###############################################################################
# Non-regressiont tests
@pytest.mark.parametrize("gzip_response", [True, False])
@pytest.mark.parametrize("parser", ("liac-arff", "pandas"))
def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response, parser):
"""Check that we can load the "zoo" dataset.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/14340
"""
if parser == "pandas":
pytest.importorskip("pandas")
data_id = 62
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
dataset = sklearn.datasets.fetch_openml(
data_id=data_id, cache=False, as_frame=False, parser=parser
)
assert dataset is not None
# The dataset has 17 features, including 1 ignored (animal),
# so we assert that we don't have the ignored feature in the final Bunch
assert dataset["data"].shape == (101, 16)
assert "animal" not in dataset["feature_names"]
def test_fetch_openml_strip_quotes(monkeypatch):
"""Check that we strip the single quotes when used as a string delimiter.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/23381
"""
pd = pytest.importorskip("pandas")
data_id = 40966
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
common_params = {"as_frame": True, "cache": False, "data_id": data_id}
mice_pandas = fetch_openml(parser="pandas", **common_params)
mice_liac_arff = fetch_openml(parser="liac-arff", **common_params)
pd.testing.assert_series_equal(mice_pandas.target, mice_liac_arff.target)
assert not mice_pandas.target.str.startswith("'").any()
assert not mice_pandas.target.str.endswith("'").any()
# similar behaviour should be observed when the column is not the target
mice_pandas = fetch_openml(parser="pandas", target_column="NUMB_N", **common_params)
mice_liac_arff = fetch_openml(
parser="liac-arff", target_column="NUMB_N", **common_params
)
pd.testing.assert_series_equal(
mice_pandas.frame["class"], mice_liac_arff.frame["class"]
)
assert not mice_pandas.frame["class"].str.startswith("'").any()
assert not mice_pandas.frame["class"].str.endswith("'").any()
def test_fetch_openml_leading_whitespace(monkeypatch):
"""Check that we can strip leading whitespace in pandas parser.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/25311
"""
pd = pytest.importorskip("pandas")
data_id = 1590
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
common_params = {"as_frame": True, "cache": False, "data_id": data_id}
adult_pandas = fetch_openml(parser="pandas", **common_params)
adult_liac_arff = fetch_openml(parser="liac-arff", **common_params)
pd.testing.assert_series_equal(
adult_pandas.frame["class"], adult_liac_arff.frame["class"]
)
def test_fetch_openml_quotechar_escapechar(monkeypatch):
"""Check that we can handle escapechar and single/double quotechar.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/25478
"""
pd = pytest.importorskip("pandas")
data_id = 42074
_monkey_patch_webbased_functions(monkeypatch, data_id=data_id, gzip_response=False)
common_params = {"as_frame": True, "cache": False, "data_id": data_id}
adult_pandas = fetch_openml(parser="pandas", **common_params)
adult_liac_arff = fetch_openml(parser="liac-arff", **common_params)
pd.testing.assert_frame_equal(adult_pandas.frame, adult_liac_arff.frame)
| _MockHTTPResponse |
python | sanic-org__sanic | guide/webapp/display/layouts/home.py | {
"start": 268,
"end": 2362
} | class ____(BaseLayout):
@contextmanager
def layout(
self, request: Request, full: bool = True
) -> Generator[None, None, None]:
self._sponsors()
self._hero(request.ctx.language)
with self.builder.div(class_="home container"):
yield
self._footer(request)
def _hero(self, language: str) -> None:
with self.builder.section(class_="hero is-large has-text-centered"):
self.builder.div(
E.h1(E.span("Sanic"), class_="title"),
E.h2(class_="subtitle")("Build fast. Run fast."),
E.h3(class_="tagline")("Accelerate your web app development"),
self._do_buttons(language),
class_="hero-body",
)
def _do_buttons(self, language: str) -> Builder:
builder = E.div(class_="buttons is-centered")
with builder:
builder.a(
"Get Started",
class_="button is-primary",
href=f"/{language}/guide/getting-started.html",
)
builder.a(
"Help",
class_="button is-outlined",
href=f"/{language}/help.html",
)
builder.a(
"GitHub",
class_="button is-outlined",
href="https://github.com/sanic-org/sanic",
target="_blank",
)
return builder
def _sponsors(self) -> None:
with self.builder.section(class_="sponsors"):
self.builder(
"Secure, auto-document, and monetize "
"your Sanic API with Zuplo",
E.a(
"Start free",
href="https://zuplo.com",
target="_blank",
class_="button is-primary is-small",
),
)
def _footer(self, request: Request) -> None:
do_footer(
self.builder,
request,
extra_classes="mb-0 mt-6",
with_pagination=False,
)
| HomeLayout |
python | django__django | tests/test_utils/tests.py | {
"start": 86757,
"end": 88282
} | class ____(SimpleTestCase):
def test_installed_apps(self):
self.assertEqual(
[app_config.label for app_config in self.class_apps.get_app_configs()],
["test_utils"],
)
def test_class_decoration(self):
class ClassDecoration(models.Model):
pass
self.assertEqual(ClassDecoration._meta.apps, self.class_apps)
@isolate_apps("test_utils", kwarg_name="method_apps")
def test_method_decoration(self, method_apps):
class MethodDecoration(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
def test_context_manager(self):
with isolate_apps("test_utils") as context_apps:
class ContextManager(models.Model):
pass
self.assertEqual(ContextManager._meta.apps, context_apps)
@isolate_apps("test_utils", kwarg_name="method_apps")
def test_nested(self, method_apps):
class MethodDecoration(models.Model):
pass
with isolate_apps("test_utils") as context_apps:
class ContextManager(models.Model):
pass
with isolate_apps("test_utils") as nested_context_apps:
class NestedContextManager(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
self.assertEqual(ContextManager._meta.apps, context_apps)
self.assertEqual(NestedContextManager._meta.apps, nested_context_apps)
| IsolatedAppsTests |
python | cython__cython | Cython/Compiler/Tests/TestBuffer.py | {
"start": 205,
"end": 1574
} | class ____(CythonTest):
# First, we only test the raw parser, i.e.
# the number and contents of arguments are NOT checked.
# However "dtype"/the first positional argument is special-cased
# to parse a type argument rather than an expression
def parse(self, s):
return self.should_not_fail(lambda: self.fragment(s)).root
def not_parseable(self, expected_error, s):
e = self.should_fail(lambda: self.fragment(s), Errors.CompileError)
self.assertEqual(expected_error, e.message_only)
def test_basic(self):
t = self.parse("cdef object[float, 4, ndim=2, foo=foo] x")
bufnode = t.stats[0].base_type
self.assertTrue(isinstance(bufnode, TemplatedTypeNode))
self.assertEqual(2, len(bufnode.positional_args))
# print bufnode.dump()
# should put more here...
def test_type_pos(self):
self.parse("cdef object[short unsigned int, 3] x")
def test_type_keyword(self):
self.parse("cdef object[foo=foo, dtype=short unsigned int] x")
def test_pos_after_key(self):
self.not_parseable("Non-keyword arg following keyword arg",
"cdef object[foo=1, 2] x")
# See also tests/error/e_bufaccess.pyx and tets/run/bufaccess.pyx
# THESE TESTS ARE NOW DISABLED, the code they test was pretty much
# refactored away
| TestBufferParsing |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/operators/test_job.py | {
"start": 40609,
"end": 41472
} | class ____:
@pytest.fixture(autouse=True)
def setup_tests(self):
self._default_client_patch = patch(f"{HOOK_CLASS}._get_default_client")
self._default_client_mock = self._default_client_patch.start()
yield
patch.stopall()
@pytest.mark.db_test
@patch("kubernetes.config.load_kube_config")
@patch("kubernetes.client.api.BatchV1Api.patch_namespaced_job")
def test_update_execute(self, mock_patch_namespaced_job, mock_load_kube_config):
op = KubernetesPatchJobOperator(
kubernetes_conn_id="kubernetes_default",
task_id="test_update_job",
name="test_job_name",
namespace="test_job_namespace",
body={"spec": {"suspend": False}},
)
op.execute(None)
mock_patch_namespaced_job.assert_called()
| TestKubernetesPatchJobOperator |
python | qdrant__qdrant-client | tests/congruence_tests/test_sparse_recommend.py | {
"start": 607,
"end": 14857
} | class ____:
__test__ = False
def __init__(self):
self.query_image = random_sparse_vectors({"sparse-image": sparse_image_vector_size})[
"sparse-image"
]
@classmethod
def simple_recommend_image(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(positive=[10], negative=[])
),
with_payload=True,
limit=10,
using="sparse-image",
).points
@classmethod
def many_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(recommend=models.RecommendInput(positive=[10, 19])),
with_payload=True,
limit=10,
using="sparse-image",
).points
@classmethod
def simple_recommend_negative(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(positive=[10], negative=[15, 7])
),
with_payload=True,
limit=10,
using="sparse-image",
).points
@classmethod
def recommend_from_another_collection(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(positive=[10], negative=[15, 7])
),
with_payload=True,
limit=10,
using="sparse-image",
lookup_from=models.LookupLocation(
collection=secondary_collection_name,
vector="sparse-image",
),
).points
@classmethod
def filter_recommend_text(
cls, client: QdrantBase, query_filter: models.Filter
) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(recommend=models.RecommendInput(positive=[10])),
query_filter=query_filter,
with_payload=True,
limit=10,
using="sparse-text",
).points
@classmethod
def best_score_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=[10, 20], negative=[], strategy=models.RecommendStrategy.BEST_SCORE
)
),
with_payload=True,
limit=10,
using="sparse-image",
).points
@classmethod
def best_score_recommend_euclid(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=[10, 20],
negative=[11, 21],
strategy=models.RecommendStrategy.BEST_SCORE,
)
),
with_payload=True,
limit=10,
using="sparse-code",
).points
@classmethod
def only_negatives_best_score_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=None, negative=[10, 12], strategy=models.RecommendStrategy.BEST_SCORE
)
),
with_payload=True,
limit=10,
using="sparse-image",
).points
@classmethod
def only_negatives_best_score_recommend_euclid(
cls, client: QdrantBase
) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=None, negative=[10, 12], strategy=models.RecommendStrategy.BEST_SCORE
)
),
with_payload=True,
limit=10,
using="sparse-code",
).points
@classmethod
def sum_scores_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=[10, 20], negative=[], strategy=models.RecommendStrategy.SUM_SCORES
)
),
with_payload=True,
limit=10,
using="sparse-image",
).points
@classmethod
def sum_scores_recommend_euclid(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=[10, 20],
negative=[11, 21],
strategy=models.RecommendStrategy.SUM_SCORES,
)
),
with_payload=True,
limit=10,
using="sparse-code",
).points
@classmethod
def only_negatives_sum_scores_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=None, negative=[10, 12], strategy=models.RecommendStrategy.SUM_SCORES
)
),
with_payload=True,
limit=10,
using="sparse-image",
).points
@classmethod
def only_negatives_sum_scores_recommend_euclid(
cls, client: QdrantBase
) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=None, negative=[10, 12], strategy=models.RecommendStrategy.SUM_SCORES
)
),
with_payload=True,
limit=10,
using="sparse-code",
).points
@classmethod
def avg_vector_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=[10, 13],
negative=[],
strategy=models.RecommendStrategy.AVERAGE_VECTOR,
)
),
with_payload=True,
limit=10,
using="sparse-image",
).points
def recommend_from_raw_vectors(self, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(positive=[self.query_image], negative=[])
),
with_payload=True,
limit=10,
using="sparse-image",
).points
def recommend_from_raw_vectors_and_ids(self, client: QdrantBase) -> list[models.ScoredPoint]:
return client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(positive=[self.query_image, 10], negative=[])
),
with_payload=True,
limit=10,
using="sparse-image",
).points
@staticmethod
def recommend_batch(client: QdrantBase) -> list[models.QueryResponse]:
return client.query_batch_points(
collection_name=COLLECTION_NAME,
requests=[
models.QueryRequest(
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=[3],
negative=[],
strategy=models.RecommendStrategy.AVERAGE_VECTOR,
)
),
limit=1,
using="sparse-image",
),
models.QueryRequest(
query=models.RecommendQuery(
recommend=models.RecommendInput(
positive=[10],
negative=[],
strategy=models.RecommendStrategy.BEST_SCORE,
)
),
limit=2,
using="sparse-image",
lookup_from=models.LookupLocation(
collection=secondary_collection_name,
vector="sparse-image",
),
),
],
)
def test_simple_recommend() -> None:
fixture_points = generate_sparse_fixtures()
secondary_collection_points = generate_sparse_fixtures(100)
searcher = TestSimpleRecommendation()
local_client = init_local()
init_client(
local_client,
fixture_points,
vectors_config={},
sparse_vectors_config=sparse_vectors_config,
)
init_client(
local_client,
secondary_collection_points,
secondary_collection_name,
vectors_config={},
sparse_vectors_config=sparse_vectors_config,
)
remote_client = init_remote()
init_client(
remote_client,
fixture_points,
vectors_config={},
sparse_vectors_config=sparse_vectors_config,
)
init_client(
remote_client,
secondary_collection_points,
secondary_collection_name,
vectors_config={},
sparse_vectors_config=sparse_vectors_config,
)
compare_client_results(local_client, remote_client, searcher.simple_recommend_image)
compare_client_results(local_client, remote_client, searcher.many_recommend)
compare_client_results(local_client, remote_client, searcher.simple_recommend_negative)
compare_client_results(local_client, remote_client, searcher.recommend_from_another_collection)
compare_client_results(local_client, remote_client, searcher.best_score_recommend)
compare_client_results(local_client, remote_client, searcher.best_score_recommend_euclid)
compare_client_results(
local_client, remote_client, searcher.only_negatives_best_score_recommend
)
compare_client_results(
local_client, remote_client, searcher.only_negatives_best_score_recommend_euclid
)
compare_client_results(local_client, remote_client, searcher.sum_scores_recommend)
compare_client_results(local_client, remote_client, searcher.sum_scores_recommend_euclid)
compare_client_results(
local_client, remote_client, searcher.only_negatives_sum_scores_recommend
)
compare_client_results(
local_client, remote_client, searcher.only_negatives_sum_scores_recommend_euclid
)
compare_client_results(local_client, remote_client, searcher.avg_vector_recommend)
compare_client_results(local_client, remote_client, searcher.recommend_from_raw_vectors)
compare_client_results(
local_client, remote_client, searcher.recommend_from_raw_vectors_and_ids
)
compare_client_results(local_client, remote_client, searcher.recommend_batch)
for _ in range(10):
query_filter = one_random_filter_please()
try:
compare_client_results(
local_client,
remote_client,
searcher.filter_recommend_text,
query_filter=query_filter,
)
except AssertionError as e:
print(f"\nFailed with filter {query_filter}")
raise e
def test_query_with_nan():
fixture_points = generate_sparse_fixtures()
sparse_vector_dict = random_sparse_vectors({"sparse-image": sparse_image_vector_size})
sparse_vector = sparse_vector_dict["sparse-image"]
sparse_vector.values[0] = np.nan
using = "sparse-image"
local_client = init_local()
remote_client = init_remote()
init_client(
local_client,
fixture_points,
vectors_config={},
sparse_vectors_config=sparse_vectors_config,
)
init_client(
remote_client,
fixture_points,
vectors_config={},
sparse_vectors_config=sparse_vectors_config,
)
with pytest.raises(AssertionError):
local_client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(positive=[sparse_vector], negative=[])
),
using=using,
)
with pytest.raises(UnexpectedResponse):
remote_client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(positive=[sparse_vector], negative=[])
),
using=using,
)
with pytest.raises(AssertionError):
local_client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(positive=[1], negative=[sparse_vector])
),
using=using,
)
with pytest.raises(UnexpectedResponse):
remote_client.query_points(
collection_name=COLLECTION_NAME,
query=models.RecommendQuery(
recommend=models.RecommendInput(positive=[1], negative=[sparse_vector])
),
using=using,
)
| TestSimpleRecommendation |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 353,
"end": 465
} | class ____(Exception):
"""Indicates that a constraint has been violated."""
@beta
| ConstraintViolationException |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/dataplex.py | {
"start": 2702,
"end": 86025
} | class ____(GoogleBaseHook, OperationHelper):
"""
Hook for Google Dataplex.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
_conn: Resource | None = None
def __init__(
self,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self.api_version = api_version
self.location = location
def get_dataplex_client(self) -> DataplexServiceClient:
"""Return DataplexServiceClient."""
client_options = ClientOptions(api_endpoint="dataplex.googleapis.com:443")
return DataplexServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_dataplex_data_scan_client(self) -> DataScanServiceClient:
"""Return DataScanServiceClient."""
client_options = ClientOptions(api_endpoint="dataplex.googleapis.com:443")
return DataScanServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_dataplex_catalog_client(self) -> CatalogServiceClient:
"""Return CatalogServiceClient."""
client_options = ClientOptions(api_endpoint="dataplex.googleapis.com:443")
return CatalogServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def wait_for_operation(self, operation: Operation, timeout: float | None = None):
"""Wait for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
@GoogleBaseHook.fallback_to_default_project_id
def create_entry(
self,
location: str,
entry_id: str,
entry_group_id: str,
entry_configuration: Entry | dict,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Entry:
"""
Create an EntryType resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_id: Required. Entry identifier. It has to be unique within an Entry Group.
Entries corresponding to Google Cloud resources use an Entry ID format based on `full resource
names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__.
The format is a full resource name of the resource without the prefix double slashes in the API
service name part of the full resource name. This allows retrieval of entries using their
associated resource name.
For example, if the full resource name of a resource is
``//library.googleapis.com/shelves/shelf1/books/book2``, then the suggested entry_id is
``library.googleapis.com/shelves/shelf1/books/book2``.
It is also suggested to follow the same convention for entries corresponding to resources from
providers or systems other than Google Cloud.
The maximum size of the field is 4000 characters.
:param entry_group_id: Required. EntryGroup resource name to which created Entry belongs to.
:param entry_configuration: Required. Entry configuration body.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.create_entry(
request={
"parent": client.entry_group_path(project_id, location, entry_group_id),
"entry_id": entry_id,
"entry": entry_configuration,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_entry(
self,
location: str,
entry_id: str,
entry_group_id: str,
view: EntryView | str | None = None,
aspect_types: MutableSequence[str] | None = None,
paths: MutableSequence[str] | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Entry:
"""
Get an Entry resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_id: Required. Entry identifier. It has to be unique within an Entry Group.
Entries corresponding to Google Cloud resources use an Entry ID format based on `full resource
names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__.
The format is a full resource name of the resource without the prefix double slashes in the API
service name part of the full resource name. This allows retrieval of entries using their
associated resource name.
For example, if the full resource name of a resource is
``//library.googleapis.com/shelves/shelf1/books/book2``, then the suggested entry_id is
``library.googleapis.com/shelves/shelf1/books/book2``.
It is also suggested to follow the same convention for entries corresponding to resources from
providers or systems other than Google Cloud.
The maximum size of the field is 4000 characters.
:param entry_group_id: Required. EntryGroup resource name to which created Entry belongs to.
:param view: Optional. View to control which parts of an entry the service should return.
:param aspect_types: Optional. Limits the aspects returned to the provided aspect types.
It only works for CUSTOM view.
:param paths: Optional. Limits the aspects returned to those associated with the provided paths
within the Entry. It only works for CUSTOM view.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.get_entry(
request={
"name": client.entry_path(project_id, location, entry_group_id, entry_id),
"view": view,
"aspect_types": aspect_types,
"paths": paths,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_entry(
self,
location: str,
entry_id: str,
entry_group_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Entry:
"""
Delete an AspectType resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_id: Required. Entry identifier. It has to be unique within an Entry Group.
Entries corresponding to Google Cloud resources use an Entry ID format based on `full resource
names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__.
The format is a full resource name of the resource without the prefix double slashes in the API
service name part of the full resource name. This allows retrieval of entries using their
associated resource name.
For example, if the full resource name of a resource is
``//library.googleapis.com/shelves/shelf1/books/book2``, then the suggested entry_id is
``library.googleapis.com/shelves/shelf1/books/book2``.
It is also suggested to follow the same convention for entries corresponding to resources from
providers or systems other than Google Cloud.
The maximum size of the field is 4000 characters.
:param entry_group_id: Required. EntryGroup resource name to which created Entry belongs to.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.delete_entry(
request={
"name": client.entry_path(project_id, location, entry_group_id, entry_id),
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_entries(
self,
location: str,
entry_group_id: str,
filter_by: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListEntriesPager:
r"""
List Entries resources from specific location.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_group_id: Required. EntryGroup resource name to which created Entry belongs to.
:param filter_by: Optional. A filter on the entries to return. Filters are case-sensitive.
You can filter the request by the following fields:
- entry_type
- entry_source.display_name
The comparison operators are =, !=, <, >, <=, >=. The service compares strings according to
lexical order.
You can use the logical operators AND, OR, NOT in the filter. You can use Wildcard "*", but for
entry_type you need to provide the full project id or number.
Example filter expressions:
- "entry_source.display_name=AnExampleDisplayName"
- "entry_type=projects/example-project/locations/global/entryTypes/example-entry_type"
- "entry_type=projects/example-project/locations/us/entryTypes/a\*
OR entry_type=projects/another-project/locations/\*"
- "NOT entry_source.display_name=AnotherExampleDisplayName".
:param page_size: Optional. Number of items to return per page. If there are remaining results,
the service returns a next_page_token. If unspecified, the service returns at most 10 Entries.
The maximum value is 100; values above 100 will be coerced to 100.
:param page_token: Optional. Page token received from a previous ``ListEntries`` call. Provide
this to retrieve the subsequent page.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.list_entries(
request={
"parent": client.entry_group_path(project_id, location, entry_group_id),
"filter": filter_by,
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def search_entries(
self,
location: str,
query: str,
order_by: str | None = None,
scope: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> SearchEntriesPager:
"""
Search for Entries matching the given query and scope.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param query: Required. The query against which entries in scope should be matched. The query
syntax is defined in `Search syntax for Dataplex Catalog
<https://cloud.google.com/dataplex/docs/search-syntax>`__.
:param order_by: Optional. Specifies the ordering of results. Supported values are:
- ``relevance`` (default)
- ``last_modified_timestamp``
- ``last_modified_timestamp asc``
:param scope: Optional. The scope under which the search should be operating. It must either be
``organizations/<org_id>`` or ``projects/<project_ref>``. If it is unspecified, it
defaults to the organization where the project provided in ``name`` is located.
:param page_size: Optional. Number of items to return per page. If there are remaining results,
the service returns a next_page_token. If unspecified, the service returns at most 10 Entries.
The maximum value is 100; values above 100 will be coerced to 100.
:param page_token: Optional. Page token received from a previous ``ListEntries`` call. Provide
this to retrieve the subsequent page.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.search_entries(
request={
"name": client.common_location_path(project_id, location),
"query": query,
"order_by": order_by,
"page_size": page_size,
"page_token": page_token,
"scope": scope,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def lookup_entry(
self,
location: str,
entry_id: str,
entry_group_id: str,
view: EntryView | str | None = None,
aspect_types: MutableSequence[str] | None = None,
paths: MutableSequence[str] | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Entry:
"""
Look up a single Entry by name using the permission on the source system.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_id: Required. Entry identifier. It has to be unique within an Entry Group.
Entries corresponding to Google Cloud resources use an Entry ID format based on `full resource
names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__.
The format is a full resource name of the resource without the prefix double slashes in the API
service name part of the full resource name. This allows retrieval of entries using their
associated resource name.
For example, if the full resource name of a resource is
``//library.googleapis.com/shelves/shelf1/books/book2``, then the suggested entry_id is
``library.googleapis.com/shelves/shelf1/books/book2``.
It is also suggested to follow the same convention for entries corresponding to resources from
providers or systems other than Google Cloud.
The maximum size of the field is 4000 characters.
:param entry_group_id: Required. EntryGroup resource name to which created Entry belongs to.
:param view: Optional. View to control which parts of an entry the service should return.
:param aspect_types: Optional. Limits the aspects returned to the provided aspect types.
It only works for CUSTOM view.
:param paths: Optional. Limits the aspects returned to those associated with the provided paths
within the Entry. It only works for CUSTOM view.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.lookup_entry(
request={
"name": client.common_location_path(project_id, location),
"entry": client.entry_path(project_id, location, entry_group_id, entry_id),
"view": view,
"aspect_types": aspect_types,
"paths": paths,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_entry(
self,
location: str,
entry_id: str,
entry_group_id: str,
entry_configuration: dict | Entry,
allow_missing: bool | None = False,
delete_missing_aspects: bool | None = False,
aspect_keys: MutableSequence[str] | None = None,
update_mask: list[str] | FieldMask | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Entry:
"""
Update an Entry resource.
:param entry_id: Required. Entry identifier. It has to be unique within an Entry Group.
Entries corresponding to Google Cloud resources use an Entry ID format based on `full resource
names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__.
The format is a full resource name of the resource without the prefix double slashes in the API
service name part of the full resource name. This allows retrieval of entries using their
associated resource name.
For example, if the full resource name of a resource is
``//library.googleapis.com/shelves/shelf1/books/book2``, then the suggested entry_id is
``library.googleapis.com/shelves/shelf1/books/book2``.
It is also suggested to follow the same convention for entries corresponding to resources from
providers or systems other than Google Cloud.
The maximum size of the field is 4000 characters.
:param entry_group_id: Required. EntryGroup resource name to which created Entry belongs to.
:param entry_configuration: Required. The updated configuration body of the Entry.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param update_mask: Optional. Names of fields whose values to overwrite on an entry group.
If this parameter is absent or empty, all modifiable fields are overwritten. If such
fields are non-required and omitted in the request body, their values are emptied.
:param allow_missing: Optional. If set to true and entry doesn't exist, the service will create it.
:param delete_missing_aspects: Optional. If set to true and the aspect_keys specify aspect
ranges, the service deletes any existing aspects from that range that were not provided
in the request.
:param aspect_keys: Optional. The map keys of the Aspects which the service should modify.
It supports the following syntax:
- ``<aspect_type_reference>`` - matches an aspect of the given type and empty path.
- ``<aspect_type_reference>@path`` - matches an aspect of the given type and specified path.
For example, to attach an aspect to a field that is specified by the ``schema``
aspect, the path should have the format ``Schema.<field_name>``.
- ``<aspect_type_reference>@*`` - matches aspects of the given type for all paths.
- ``*@path`` - matches aspects of all types on the given path.
The service will not remove existing aspects matching the syntax unless ``delete_missing_aspects``
is set to true.
If this field is left empty, the service treats it as specifying exactly those Aspects present
in the request.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
_entry = (
deepcopy(entry_configuration)
if isinstance(entry_configuration, dict)
else Entry.to_dict(entry_configuration)
)
_entry["name"] = client.entry_path(project_id, location, entry_group_id, entry_id)
return client.update_entry(
request={
"entry": _entry,
"update_mask": FieldMask(paths=update_mask) if type(update_mask) is list else update_mask,
"allow_missing": allow_missing,
"delete_missing_aspects": delete_missing_aspects,
"aspect_keys": aspect_keys,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_aspect_type(
self,
location: str,
aspect_type_id: str,
aspect_type_configuration: AspectType | dict,
project_id: str = PROVIDE_PROJECT_ID,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create an EntryType resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param aspect_type_id: Required. AspectType identifier.
:param aspect_type_configuration: Required. AspectType configuration body.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.create_aspect_type(
request={
"parent": client.common_location_path(project_id, location),
"aspect_type_id": aspect_type_id,
"aspect_type": aspect_type_configuration,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_aspect_type(
self,
location: str,
aspect_type_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> AspectType:
"""
Get an AspectType resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param aspect_type_id: Required. AspectType identifier.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.get_aspect_type(
request={
"name": client.aspect_type_path(project_id, location, aspect_type_id),
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_entry_type(
self,
location: str,
entry_type_id: str,
entry_type_configuration: EntryType | dict,
project_id: str = PROVIDE_PROJECT_ID,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create an EntryType resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_type_id: Required. EntryType identifier.
:param entry_type_configuration: Required. EntryType configuration body.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.create_entry_type(
request={
"parent": client.common_location_path(project_id, location),
"entry_type_id": entry_type_id,
"entry_type": entry_type_configuration,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_entry_type(
self,
location: str,
entry_type_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> EntryType:
"""
Get an EntryType resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_type_id: Required. EntryGroup identifier.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.get_entry_type(
request={
"name": client.entry_type_path(project_id, location, entry_type_id),
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_aspect_type(
self,
location: str,
aspect_type_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete an AspectType resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param aspect_type_id: Required. AspectType identifier.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.delete_aspect_type(
request={
"name": client.aspect_type_path(project_id, location, aspect_type_id),
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_aspect_types(
self,
location: str,
filter_by: str | None = None,
order_by: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListAspectTypesPager:
"""
List AspectTypes resources from specific location.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param filter_by: Optional. Filter to apply on the list results.
:param order_by: Optional. Fields to order the results by.
:param page_size: Optional. Maximum number of EntryGroups to return on one page.
:param page_token: Optional. Token to retrieve the next page of results.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.list_aspect_types(
request={
"parent": client.common_location_path(project_id, location),
"filter": filter_by,
"order_by": order_by,
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_aspect_type(
self,
location: str,
aspect_type_id: str,
aspect_type_configuration: dict | AspectType,
project_id: str = PROVIDE_PROJECT_ID,
update_mask: list[str] | FieldMask | None = None,
validate_only: bool | None = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Update an AspectType resource.
:param aspect_type_id: Required. ID of the AspectType to update.
:param aspect_type_configuration: Required. The updated configuration body of the AspectType.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param update_mask: Optional. Names of fields whose values to overwrite on an entry group.
If this parameter is absent or empty, all modifiable fields are overwritten. If such
fields are non-required and omitted in the request body, their values are emptied.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param validate_only: Optional. The service validates the request without performing any mutations.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
_aspect_type = (
deepcopy(aspect_type_configuration)
if isinstance(aspect_type_configuration, dict)
else AspectType.to_dict(aspect_type_configuration)
)
_aspect_type["name"] = client.aspect_type_path(project_id, location, aspect_type_id)
return client.update_aspect_type(
request={
"aspect_type": _aspect_type,
"update_mask": FieldMask(paths=update_mask) if type(update_mask) is list else update_mask,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_entry_type(
self,
location: str,
entry_type_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete an EntryType resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_type_id: Required. EntryType identifier.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.delete_entry_type(
request={
"name": client.entry_type_path(project_id, location, entry_type_id),
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_entry_types(
self,
location: str,
filter_by: str | None = None,
order_by: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListEntryTypesPager:
"""
List EntryTypes resources from specific location.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param filter_by: Optional. Filter to apply on the list results.
:param order_by: Optional. Fields to order the results by.
:param page_size: Optional. Maximum number of EntryGroups to return on one page.
:param page_token: Optional. Token to retrieve the next page of results.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.list_entry_types(
request={
"parent": client.common_location_path(project_id, location),
"filter": filter_by,
"order_by": order_by,
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_entry_type(
self,
location: str,
entry_type_id: str,
entry_type_configuration: dict | EntryType,
project_id: str = PROVIDE_PROJECT_ID,
update_mask: list[str] | FieldMask | None = None,
validate_only: bool | None = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Update an EntryType resource.
:param entry_type_id: Required. ID of the EntryType to update.
:param entry_type_configuration: Required. The updated configuration body of the EntryType.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param update_mask: Optional. Names of fields whose values to overwrite on an entry group.
If this parameter is absent or empty, all modifiable fields are overwritten. If such
fields are non-required and omitted in the request body, their values are emptied.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param validate_only: Optional. The service validates the request without performing any mutations.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
_entry_type = (
deepcopy(entry_type_configuration)
if isinstance(entry_type_configuration, dict)
else EntryType.to_dict(entry_type_configuration)
)
_entry_type["name"] = client.entry_type_path(project_id, location, entry_type_id)
return client.update_entry_type(
request={
"entry_type": _entry_type,
"update_mask": FieldMask(paths=update_mask) if type(update_mask) is list else update_mask,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_entry_group(
self,
location: str,
entry_group_id: str,
entry_group_configuration: EntryGroup | dict,
project_id: str = PROVIDE_PROJECT_ID,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create an Entry resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_group_id: Required. EntryGroup identifier.
:param entry_group_configuration: Required. EntryGroup configuration body.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.create_entry_group(
request={
"parent": client.common_location_path(project_id, location),
"entry_group_id": entry_group_id,
"entry_group": entry_group_configuration,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_entry_group(
self,
location: str,
entry_group_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> EntryGroup:
"""
Get an EntryGroup resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_group_id: Required. EntryGroup identifier.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.get_entry_group(
request={
"name": client.entry_group_path(project_id, location, entry_group_id),
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_entry_group(
self,
location: str,
entry_group_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete an EntryGroup resource.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param entry_group_id: Required. EntryGroup identifier.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.delete_entry_group(
request={
"name": client.entry_group_path(project_id, location, entry_group_id),
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_entry_groups(
self,
location: str,
filter_by: str | None = None,
order_by: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListEntryGroupsPager:
"""
List EntryGroups resources from specific location.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param filter_by: Optional. Filter to apply on the list results.
:param order_by: Optional. Fields to order the results by.
:param page_size: Optional. Maximum number of EntryGroups to return on one page.
:param page_token: Optional. Token to retrieve the next page of results.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
return client.list_entry_groups(
request={
"parent": client.common_location_path(project_id, location),
"filter": filter_by,
"order_by": order_by,
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_entry_group(
self,
location: str,
entry_group_id: str,
entry_group_configuration: dict | EntryGroup,
project_id: str = PROVIDE_PROJECT_ID,
update_mask: list[str] | FieldMask | None = None,
validate_only: bool | None = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Update an EntryGroup resource.
:param entry_group_id: Required. ID of the EntryGroup to update.
:param entry_group_configuration: Required. The updated configuration body of the EntryGroup.
:param location: Required. The ID of the Google Cloud location that the task belongs to.
:param update_mask: Optional. Names of fields whose values to overwrite on an entry group.
If this parameter is absent or empty, all modifiable fields are overwritten. If such
fields are non-required and omitted in the request body, their values are emptied.
:param project_id: Optional. The ID of the Google Cloud project that the task belongs to.
:param validate_only: Optional. The service validates the request without performing any mutations.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
"""
client = self.get_dataplex_catalog_client()
_entry_group = (
deepcopy(entry_group_configuration)
if isinstance(entry_group_configuration, dict)
else EntryGroup.to_dict(entry_group_configuration)
)
_entry_group["name"] = client.entry_group_path(project_id, location, entry_group_id)
return client.update_entry_group(
request={
"entry_group": _entry_group,
"update_mask": FieldMask(paths=update_mask) if type(update_mask) is list else update_mask,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_task(
self,
project_id: str,
region: str,
lake_id: str,
body: dict[str, Any] | Task,
dataplex_task_id: str,
validate_only: bool | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Create a task resource within a lake.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param body: Required. The Request body contains an instance of Task.
:param dataplex_task_id: Required. Task identifier.
:param validate_only: Optional. Only validate the request, but do not perform mutations.
The default is false.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
parent = f"projects/{project_id}/locations/{region}/lakes/{lake_id}"
client = self.get_dataplex_client()
result = client.create_task(
request={
"parent": parent,
"task_id": dataplex_task_id,
"task": body,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_task(
self,
project_id: str,
region: str,
lake_id: str,
dataplex_task_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Delete the task resource.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param dataplex_task_id: Required. The ID of the Google Cloud task to be deleted.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}/tasks/{dataplex_task_id}"
client = self.get_dataplex_client()
result = client.delete_task(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_tasks(
self,
project_id: str,
region: str,
lake_id: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
List tasks under the given lake.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param page_size: Optional. Maximum number of tasks to return. The service may return fewer than this
value. If unspecified, at most 10 tasks will be returned. The maximum value is 1000;
values above 1000 will be coerced to 1000.
:param page_token: Optional. Page token received from a previous ListZones call. Provide this to
retrieve the subsequent page. When paginating, all other parameters provided to ListZones must
match the call that provided the page token.
:param filter: Optional. Filter request.
:param order_by: Optional. Order by fields for the result.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
parent = f"projects/{project_id}/locations/{region}/lakes/{lake_id}"
client = self.get_dataplex_client()
result = client.list_tasks(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_task(
self,
project_id: str,
region: str,
lake_id: str,
dataplex_task_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Get task resource.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the task belongs to.
:param dataplex_task_id: Required. The ID of the Google Cloud task to be retrieved.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}/tasks/{dataplex_task_id}"
client = self.get_dataplex_client()
result = client.get_task(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_lake(
self,
project_id: str,
region: str,
lake_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Delete the lake resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. The ID of the Google Cloud lake to be deleted.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}"
client = self.get_dataplex_client()
result = client.delete_lake(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_lake(
self,
project_id: str,
region: str,
lake_id: str,
body: dict[str, Any] | Lake,
validate_only: bool | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Create a lake resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. Lake identifier.
:param body: Required. The Request body contains an instance of Lake.
:param validate_only: Optional. Only validate the request, but do not perform mutations.
The default is false.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
parent = f"projects/{project_id}/locations/{region}"
client = self.get_dataplex_client()
result = client.create_lake(
request={
"parent": parent,
"lake_id": lake_id,
"lake": body,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_lake(
self,
project_id: str,
region: str,
lake_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Get lake resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. The ID of the Google Cloud lake to be retrieved.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}/"
client = self.get_dataplex_client()
result = client.get_lake(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_zone(
self,
project_id: str,
region: str,
lake_id: str,
zone_id: str,
body: dict[str, Any] | Zone,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Create a zone resource within a lake.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. The ID of the Google Cloud lake to be retrieved.
:param body: Required. The Request body contains an instance of Zone.
:param zone_id: Required. Zone identifier.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_client()
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}"
result = client.create_zone(
request={
"parent": name,
"zone": body,
"zone_id": zone_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_zone(
self,
project_id: str,
region: str,
lake_id: str,
zone_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Delete a zone resource. All assets within a zone must be deleted before the zone can be deleted.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. The ID of the Google Cloud lake to be retrieved.
:param zone_id: Required. Zone identifier.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_client()
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}/zones/{zone_id}"
operation = client.delete_zone(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def create_asset(
self,
project_id: str,
region: str,
lake_id: str,
zone_id: str,
asset_id: str,
body: dict[str, Any] | Asset,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Create an asset resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. The ID of the Google Cloud lake to be retrieved.
:param zone_id: Required. Zone identifier.
:param asset_id: Required. Asset identifier.
:param body: Required. The Request body contains an instance of Asset.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_client()
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}/zones/{zone_id}"
result = client.create_asset(
request={
"parent": name,
"asset": body,
"asset_id": asset_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_asset(
self,
project_id: str,
region: str,
lake_id: str,
asset_id: str,
zone_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Delete an asset resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param lake_id: Required. The ID of the Google Cloud lake to be retrieved.
:param zone_id: Required. Zone identifier.
:param asset_id: Required. Asset identifier.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_client()
name = f"projects/{project_id}/locations/{region}/lakes/{lake_id}/zones/{zone_id}/assets/{asset_id}"
result = client.delete_asset(
request={"name": name},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def create_data_scan(
self,
project_id: str,
region: str,
body: dict[str, Any] | DataScan,
data_scan_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Create a DataScan resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Quality scan identifier.
:param body: Required. The Request body contains an instance of DataScan.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_data_scan_client()
parent = f"projects/{project_id}/locations/{region}"
result = client.create_data_scan(
request={
"parent": parent,
"data_scan": body,
"data_scan_id": data_scan_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def run_data_scan(
self,
project_id: str,
region: str,
data_scan_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Run an on-demand execution of a DataScan.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Quality scan identifier.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_data_scan_client()
name = PATH_DATA_SCAN.format(project_id=project_id, region=region, data_scan_id=data_scan_id)
result = client.run_data_scan(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_data_scan_job(
self,
project_id: str,
region: str,
data_scan_id: str | None = None,
job_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Get a DataScan Job resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Quality scan identifier.
:param job_id: Required. The resource name of the DataScanJob:
projects/{project_id}/locations/{region}/dataScans/{data_scan_id}/jobs/{data_scan_job_id}
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_data_scan_client()
name = f"projects/{project_id}/locations/{region}/dataScans/{data_scan_id}/jobs/{job_id}"
result = client.get_data_scan_job(
request={"name": name, "view": "FULL"},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
def wait_for_data_scan_job(
self,
data_scan_id: str,
job_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
region: str | None = None,
wait_time: int = 10,
result_timeout: float | None = None,
) -> Any:
"""
Wait for Dataplex data scan job.
:param job_id: Required. The job_id to wait for.
:param data_scan_id: Required. Data Quality scan identifier.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param project_id: Optional. Google Cloud project ID.
:param wait_time: Number of seconds between checks.
:param result_timeout: Value in seconds for which operator will wait for the Data Quality scan result.
Throws exception if there is no result found after specified amount of seconds.
"""
start = time.monotonic()
state = None
while state not in (
DataScanJob.State.CANCELLED,
DataScanJob.State.FAILED,
DataScanJob.State.SUCCEEDED,
):
if result_timeout and start + result_timeout < time.monotonic():
raise AirflowDataQualityScanResultTimeoutException(
f"Timeout: Data Quality scan {job_id} is not ready after {result_timeout}s"
)
time.sleep(wait_time)
try:
job = self.get_data_scan_job(
job_id=job_id,
data_scan_id=data_scan_id,
project_id=project_id,
region=region,
)
state = job.state
except Exception as err:
self.log.info("Retrying. Dataplex API returned error when waiting for job: %s", err)
return job
@GoogleBaseHook.fallback_to_default_project_id
def get_data_scan(
self,
project_id: str,
region: str,
data_scan_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Get a DataScan resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Quality scan identifier.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_data_scan_client()
name = PATH_DATA_SCAN.format(project_id=project_id, region=region, data_scan_id=data_scan_id)
result = client.get_data_scan(
request={"name": name, "view": "FULL"},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_data_scan(
self,
project_id: str,
region: str,
data_scan_id: str,
body: dict[str, Any] | DataScan,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Update a DataScan resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Quality scan identifier.
:param body: Required. The Request body contains an instance of DataScan.
:param update_mask: Required. Mask of fields to update.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_data_scan_client()
full_scan_name = f"projects/{project_id}/locations/{region}/dataScans/{data_scan_id}"
if body:
if isinstance(body, DataScan):
body.name = full_scan_name
elif isinstance(body, dict):
body["name"] = full_scan_name
else:
raise AirflowException("Unable to set scan_name.")
if not update_mask:
update_mask = FieldMask(
paths=["data_quality_spec", "labels", "description", "display_name", "execution_spec"]
)
result = client.update_data_scan(
request={
"data_scan": body,
"update_mask": update_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_data_scan(
self,
project_id: str,
region: str,
data_scan_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
Delete a DataScan resource.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Quality scan identifier.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_data_scan_client()
name = PATH_DATA_SCAN.format(project_id=project_id, region=region, data_scan_id=data_scan_id)
result = client.delete_data_scan(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_data_scan_jobs(
self,
project_id: str,
region: str,
data_scan_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Any:
"""
List DataScanJobs under the given DataScan.
:param project_id: Required. The ID of the Google Cloud project that the lake belongs to.
:param region: Required. The ID of the Google Cloud region that the lake belongs to.
:param data_scan_id: Required. Data Quality scan identifier.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
client = self.get_dataplex_data_scan_client()
name = PATH_DATA_SCAN.format(project_id=project_id, region=region, data_scan_id=data_scan_id)
result = client.list_data_scan_jobs(
request={
"parent": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| DataplexHook |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 46479,
"end": 52255
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c_op = ops._create_c_op(
g, ops._NodeDef("IntInputIntOutput", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "IntInputIntOutput")
self.assertLen(op.outputs, 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.unknown_shape())
self.assertEqual(list(op.inputs), [x])
self.assertEqual(op.control_inputs, [])
self.assertEqual(op.graph, g)
self.assertEqual(x.consumers(), [op])
self.assertIsNotNone(op.traceback)
self.assertIn("testBasic", op.traceback[-1])
self.assertEqual(g.get_operation_by_name("myop"), op)
self.assertEqual(g.get_tensor_by_name("myop:0"), op.outputs[0])
def testShape(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "Identity")
self.assertLen(op.outputs, 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.TensorShape([2, 3]))
def testUniqueName(self):
g = ops.Graph()
with g.as_default():
c_op = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop"), [], [])
c_op2 = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop_1"), [], [])
op = g._create_op_from_tf_operation(c_op)
op2 = g._create_op_from_tf_operation(c_op2)
# Create ops with same names as op1 and op2. We expect the new names to be
# uniquified.
op3 = test_ops.int_output(name="myop").op
op4 = test_ops.int_output(name="myop_1").op
self.assertEqual(op.name, "myop")
self.assertEqual(op2.name, "myop_1")
self.assertEqual(op3.name, "myop_2")
self.assertEqual(op4.name, "myop_1_1")
@test_util.run_v1_only("b/120545219")
def testCond(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def true_fn():
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "cond/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return x
cond.cond(x < 10, true_fn, lambda: x)
op = g.get_operation_by_name("cond/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "cond/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Switch")
self.assertEqual(op_input.inputs[0], x)
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"cond/cond_text")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return i
while_loop.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "myloop/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Enter")
self.assertEqual(list(op_input.inputs), [x])
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"myloop/while_context")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithInternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
c = constant_op.constant(1.0, name="c")
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return i
while_loop.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
c = g.get_operation_by_name("myloop/c")
self.assertIsNotNone(c)
# Internal control dep is preserved
self.assertEqual(op.control_inputs, [c])
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithExternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c = constant_op.constant(1.0)
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return i
while_loop.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
# External control dep is removed and replaced with internal control dep
self.assertNotEqual(op.control_inputs[0], c.op)
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context())
| CreateOpFromTFOperationTest |
python | sqlalchemy__sqlalchemy | test/dialect/mysql/test_compiler.py | {
"start": 4079,
"end": 22927
} | class ____(ReservedWordFixture, fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mysql.dialect()
@testing.combinations(
("mariadb", True),
("mysql", False),
(mysql.dialect(), False),
(mysql.dialect(is_mariadb=True), True),
(
create_engine(
"mysql+pymysql://", module=mock.Mock(paramstyle="format")
).dialect,
False,
),
(
create_engine(
"mariadb+pymysql://", module=mock.Mock(paramstyle="format")
).dialect,
True,
),
argnames="dialect, expect_mariadb",
)
def test_reserved_words_mysql_vs_mariadb(
self, dialect, expect_mariadb, mysql_mariadb_reserved_words
):
"""test #7167 - compiler level
We want to make sure that the "is mariadb" flag as well as the
correct identifier preparer are set up for dialects no matter how they
determine their "is_mariadb" flag.
"""
table, expected_mysql, expected_mdb = mysql_mariadb_reserved_words
self.assert_compile(
select(table),
expected_mdb if expect_mariadb else expected_mysql,
dialect=dialect,
)
def test_plain_stringify_returning(self):
t = Table(
"t",
MetaData(),
Column("myid", Integer, primary_key=True),
Column("name", String, server_default="some str"),
Column("description", String, default=func.lower("hi")),
)
stmt = t.insert().values().return_defaults()
eq_ignore_whitespace(
str(stmt.compile(dialect=mysql.dialect(is_mariadb=True))),
"INSERT INTO t (description) VALUES (lower(%s)) "
"RETURNING t.myid, t.name, t.description",
)
eq_ignore_whitespace(
str(stmt.compile(dialect=mysql.dialect())),
"INSERT INTO t (description) VALUES (lower(%s))",
)
def test_create_index_simple(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String(255)))
idx = Index("test_idx1", tbl.c.data)
self.assert_compile(
schema.CreateIndex(idx), "CREATE INDEX test_idx1 ON testtbl (data)"
)
def test_create_index_with_prefix(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String(255)))
idx = Index(
"test_idx1", tbl.c.data, mysql_length=10, mysql_prefix="FULLTEXT"
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE FULLTEXT INDEX test_idx1 ON testtbl (data(10))",
)
def test_create_index_with_text(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String(255)))
idx = Index("test_idx1", text("created_at desc"), _table=tbl)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl (created_at desc)",
)
def test_create_index_with_arbitrary_column_element(self):
from sqlalchemy.ext.compiler import compiles
class _textual_index_element(sql.ColumnElement):
"""alembic's wrapper"""
__visit_name__ = "_textual_idx_element"
def __init__(self, table, text):
self.table = table
self.text = text
@compiles(_textual_index_element)
def _render_textual_index_column(element, compiler, **kw):
return compiler.process(element.text, **kw)
m = MetaData()
tbl = Table("testtbl", m, Column("data", String(255)))
idx = Index(
"test_idx1",
_textual_index_element(tbl, text("created_at desc")),
_table=tbl,
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl (created_at desc)",
)
def test_create_index_with_parser(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String(255)))
idx = Index(
"test_idx1",
tbl.c.data,
mysql_length=10,
mysql_prefix="FULLTEXT",
mysql_with_parser="ngram",
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE FULLTEXT INDEX test_idx1 "
"ON testtbl (data(10)) WITH PARSER ngram",
)
def test_create_index_with_length(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String(255)))
idx1 = Index("test_idx1", tbl.c.data, mysql_length=10)
idx2 = Index("test_idx2", tbl.c.data, mysql_length=5)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl (data(10))",
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (data(5))",
)
def test_drop_constraint_mysql(self):
m = MetaData()
table_name = "testtbl"
constraint_name = "constraint"
constraint = CheckConstraint("data IS NOT NULL", name=constraint_name)
Table(table_name, m, Column("data", String(255)), constraint)
dialect = mysql.dialect()
self.assert_compile(
schema.DropConstraint(constraint),
"ALTER TABLE %s DROP CHECK `%s`" % (table_name, constraint_name),
dialect=dialect,
)
def test_drop_constraint_mariadb(self):
m = MetaData()
table_name = "testtbl"
constraint_name = "constraint"
constraint = CheckConstraint("data IS NOT NULL", name=constraint_name)
Table(table_name, m, Column("data", String(255)), constraint)
self.assert_compile(
schema.DropConstraint(constraint),
"ALTER TABLE %s DROP CONSTRAINT `%s`"
% (table_name, constraint_name),
dialect="mariadb",
)
def test_create_index_with_length_quoted(self):
m = MetaData()
tbl = Table(
"testtbl", m, Column("some quoted data", String(255), key="s")
)
idx1 = Index("test_idx1", tbl.c.s, mysql_length=10)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl (`some quoted data`(10))",
)
def test_create_composite_index_with_length_quoted(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("some Quoted a", String(255), key="a"),
Column("some Quoted b", String(255), key="b"),
)
idx1 = Index(
"test_idx1",
tbl.c.a,
tbl.c.b,
mysql_length={"some Quoted a": 10, "some Quoted b": 20},
)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl "
"(`some Quoted a`(10), `some Quoted b`(20))",
)
def test_create_composite_index_with_length_quoted_3085_workaround(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("some quoted a", String(255), key="a"),
Column("some quoted b", String(255), key="b"),
)
idx1 = Index(
"test_idx1",
tbl.c.a,
tbl.c.b,
mysql_length={"`some quoted a`": 10, "`some quoted b`": 20},
)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl "
"(`some quoted a`(10), `some quoted b`(20))",
)
def test_create_composite_index_with_length(self):
m = MetaData()
tbl = Table(
"testtbl", m, Column("a", String(255)), Column("b", String(255))
)
idx1 = Index(
"test_idx1", tbl.c.a, tbl.c.b, mysql_length={"a": 10, "b": 20}
)
idx2 = Index("test_idx2", tbl.c.a, tbl.c.b, mysql_length={"a": 15})
idx3 = Index("test_idx3", tbl.c.a, tbl.c.b, mysql_length=30)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl (a(10), b(20))",
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (a(15), b)",
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx3 ON testtbl (a(30), b(30))",
)
def test_create_index_with_using(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", String(255)))
idx1 = Index("test_idx1", tbl.c.data, mysql_using="btree")
idx2 = Index("test_idx2", tbl.c.data, mysql_using="hash")
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl (data) USING btree",
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (data) USING hash",
)
def test_create_pk_plain(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("data", String(255)),
PrimaryKeyConstraint("data"),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE testtbl (data VARCHAR(255) NOT NULL, "
"PRIMARY KEY (data))",
)
def test_create_pk_with_using(self):
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("data", String(255)),
PrimaryKeyConstraint("data", mysql_using="btree"),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE testtbl (data VARCHAR(255) NOT NULL, "
"PRIMARY KEY (data) USING btree)",
)
@testing.combinations(
(True, True, (10, 2, 2)),
(True, True, (10, 2, 1)),
(False, True, (10, 2, 0)),
(True, False, (8, 0, 14)),
(True, False, (8, 0, 13)),
(False, False, (8, 0, 12)),
argnames="has_brackets,is_mariadb,version",
)
def test_create_server_default_with_function_using(
self, has_brackets, is_mariadb, version
):
dialect = mysql.dialect(is_mariadb=is_mariadb)
dialect.server_version_info = version
m = MetaData()
tbl = Table(
"testtbl",
m,
Column("time", DateTime, server_default=func.current_timestamp()),
Column("name", String(255), server_default="some str"),
Column(
"description", String(255), server_default=func.lower("hi")
),
Column("data", JSON, server_default=func.json_object()),
Column(
"updated1",
DateTime,
server_default=text("now() on update now()"),
),
Column(
"updated2",
DateTime,
server_default=text("now() On UpDate now()"),
),
Column(
"updated3",
DateTime,
server_default=text("now() ON UPDATE now()"),
),
Column(
"updated4",
DateTime,
server_default=text("now(3)"),
),
Column(
"updated5",
DateTime,
server_default=text("nOW(3)"),
),
Column(
"updated6",
DateTime,
server_default=text("notnow(1)"),
),
Column(
"updated7",
DateTime,
server_default=text("CURRENT_TIMESTAMP(3)"),
),
)
eq_(dialect._support_default_function, has_brackets)
if has_brackets:
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE testtbl ("
"time DATETIME DEFAULT CURRENT_TIMESTAMP, "
"name VARCHAR(255) DEFAULT 'some str', "
"description VARCHAR(255) DEFAULT (lower('hi')), "
"data JSON DEFAULT (json_object()), "
"updated1 DATETIME DEFAULT now() on update now(), "
"updated2 DATETIME DEFAULT now() On UpDate now(), "
"updated3 DATETIME DEFAULT now() ON UPDATE now(), "
"updated4 DATETIME DEFAULT now(3), "
"updated5 DATETIME DEFAULT nOW(3), "
"updated6 DATETIME DEFAULT (notnow(1)), "
"updated7 DATETIME DEFAULT CURRENT_TIMESTAMP(3))",
dialect=dialect,
)
else:
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE testtbl ("
"time DATETIME DEFAULT CURRENT_TIMESTAMP, "
"name VARCHAR(255) DEFAULT 'some str', "
"description VARCHAR(255) DEFAULT lower('hi'), "
"data JSON DEFAULT json_object(), "
"updated1 DATETIME DEFAULT now() on update now(), "
"updated2 DATETIME DEFAULT now() On UpDate now(), "
"updated3 DATETIME DEFAULT now() ON UPDATE now(), "
"updated4 DATETIME DEFAULT now(3), "
"updated5 DATETIME DEFAULT nOW(3), "
"updated6 DATETIME DEFAULT notnow(1), "
"updated7 DATETIME DEFAULT CURRENT_TIMESTAMP(3))",
dialect=dialect,
)
def test_create_index_expr(self):
m = MetaData()
t1 = Table("foo", m, Column("x", Integer))
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x > 5)),
"CREATE INDEX bar ON foo ((x > 5))",
)
def test_create_index_expr_two(self):
m = MetaData()
tbl = Table("testtbl", m, Column("x", Integer), Column("y", Integer))
idx1 = Index("test_idx1", tbl.c.x + tbl.c.y)
idx2 = Index(
"test_idx2", tbl.c.x, tbl.c.x + tbl.c.y, tbl.c.y - tbl.c.x
)
idx3 = Index("test_idx3", tbl.c.x.desc())
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl ((x + y))",
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (x, (x + y), (y - x))",
)
self.assert_compile(
schema.CreateIndex(idx3),
"CREATE INDEX test_idx3 ON testtbl (x DESC)",
)
def test_create_index_expr_func(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("test_idx1", func.radians(tbl.c.data))
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl ((radians(data)))",
)
def test_create_index_expr_func_unary(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx1 = Index("test_idx1", -tbl.c.data)
self.assert_compile(
schema.CreateIndex(idx1),
"CREATE INDEX test_idx1 ON testtbl ((-data))",
)
def test_deferrable_initially_kw_not_ignored(self):
m = MetaData()
Table("t1", m, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m,
Column(
"id",
Integer,
ForeignKey("t1.id", deferrable=True, initially="DEFERRED"),
primary_key=True,
),
)
self.assert_compile(
schema.CreateTable(t2),
"CREATE TABLE t2 (id INTEGER NOT NULL, "
"PRIMARY KEY (id), FOREIGN KEY(id) REFERENCES t1 (id) "
"DEFERRABLE INITIALLY DEFERRED)",
)
def test_match_kw_raises(self):
m = MetaData()
Table("t1", m, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m,
Column(
"id",
Integer,
ForeignKey("t1.id", match="XYZ"),
primary_key=True,
),
)
assert_raises_message(
exc.CompileError,
"MySQL ignores the 'MATCH' keyword while at the same time causes "
"ON UPDATE/ON DELETE clauses to be ignored.",
schema.CreateTable(t2).compile,
dialect=mysql.dialect(),
)
def test_concat_compile_kw(self):
expr = literal("x", type_=String) + literal("y", type_=String)
self.assert_compile(expr, "concat('x', 'y')", literal_binds=True)
def test_mariadb_for_update(self):
table1 = table(
"mytable", column("myid"), column("name"), column("description")
)
self.assert_compile(
table1.select()
.where(table1.c.myid == 7)
.with_for_update(of=table1),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
"FOR UPDATE",
dialect="mariadb",
)
def test_delete_extra_froms(self):
t1 = table("t1", column("c1"))
t2 = table("t2", column("c1"))
q = sql.delete(t1).where(t1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM t1 USING t1, t2 WHERE t1.c1 = t2.c1"
)
def test_delete_extra_froms_alias(self):
a1 = table("t1", column("c1")).alias("a1")
t2 = table("t2", column("c1"))
q = sql.delete(a1).where(a1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM a1 USING t1 AS a1, t2 WHERE a1.c1 = t2.c1"
)
self.assert_compile(sql.delete(a1), "DELETE FROM t1 AS a1")
@testing.combinations(
("no_persisted", "", "ignore"),
("persisted_none", "", None),
("persisted_true", " STORED", True),
("persisted_false", " VIRTUAL", False),
id_="iaa",
)
def test_column_computed(self, text, persisted):
m = MetaData()
kwargs = {"persisted": persisted} if persisted != "ignore" else {}
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", **kwargs)),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER, y INTEGER GENERATED "
"ALWAYS AS (x + 2)%s)" % text,
)
def test_groupby_rollup(self):
t = table("tt", column("foo"), column("bar"))
q = sql.select(t.c.foo).group_by(sql.func.rollup(t.c.foo, t.c.bar))
self.assert_compile(
q, "SELECT tt.foo FROM tt GROUP BY tt.foo, tt.bar WITH ROLLUP"
)
| CompileTest |
python | plotly__plotly.py | plotly/graph_objs/streamtube/_lightposition.py | {
"start": 233,
"end": 3509
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "streamtube"
_path_str = "streamtube.lightposition"
_valid_props = {"x", "y", "z"}
@property
def x(self):
"""
Numeric vector, representing the X coordinate for each vertex.
The 'x' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Numeric vector, representing the Y coordinate for each vertex.
The 'y' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def z(self):
"""
Numeric vector, representing the Z coordinate for each vertex.
The 'z' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def _prop_descriptions(self):
return """\
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Lightposition object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.streamtube.Lightposition`
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
Returns
-------
Lightposition
"""
super().__init__("lightposition")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.streamtube.Lightposition
constructor must be a dict or
an instance of :class:`plotly.graph_objs.streamtube.Lightposition`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._set_property("z", arg, z)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Lightposition |
python | doocs__leetcode | solution/3300-3399/3325.Count Substrings With K-Frequency Characters I/Solution.py | {
"start": 0,
"end": 286
} | class ____:
def numberOfSubstrings(self, s: str, k: int) -> int:
cnt = Counter()
ans = l = 0
for c in s:
cnt[c] += 1
while cnt[c] >= k:
cnt[s[l]] -= 1
l += 1
ans += l
return ans
| Solution |
python | apache__airflow | airflow-core/src/airflow/cli/simple_table.py | {
"start": 1414,
"end": 4709
} | class ____(Console):
"""Airflow rich console."""
def __init__(self, show_header: bool = True, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set the width to constant to pipe whole output from console
self._width = 200 if not is_tty() else self._width
# If show header in tables
self.show_header = show_header
def print_as_json(self, data: dict):
"""Render dict as json text representation."""
json_content = json.dumps(data)
self.print(Syntax(json_content, "json", theme="ansi_dark"), soft_wrap=True)
def print_as_yaml(self, data: dict):
"""Render dict as yaml text representation."""
yaml_content = yaml.dump(data)
self.print(Syntax(yaml_content, "yaml", theme="ansi_dark"), soft_wrap=True)
def print_as_table(self, data: list[dict]):
"""Render list of dictionaries as table."""
if not data:
self.print("No data found")
return
table = SimpleTable(show_header=self.show_header)
for col in data[0]:
table.add_column(col)
for row in data:
table.add_row(*(str(d) for d in row.values()))
self.print(table)
def print_as_plain_table(self, data: list[dict]):
"""Render list of dictionaries as a simple table than can be easily piped."""
if not data:
self.print("No data found")
return
rows = [d.values() for d in data]
output = tabulate(rows, tablefmt="plain", headers=list(data[0]))
print(output)
def _normalize_data(self, value: Any, output: str) -> list | str | dict | None:
if isinstance(value, (tuple, list)):
if output == "table":
return ",".join(str(self._normalize_data(x, output)) for x in value)
return [self._normalize_data(x, output) for x in value]
if isinstance(value, dict) and output != "table":
return {k: self._normalize_data(v, output) for k, v in value.items()}
if inspect.isclass(value) and not isinstance(value, PluginsDirectorySource):
return value.__name__
if value is None:
return None
return str(value)
def print_as(
self,
data: Sequence[dict | Any],
output: str,
mapper: Callable[[Any], dict] | None = None,
) -> None:
"""Print provided using format specified by output argument."""
output_to_renderer: dict[str, Callable[[Any], None]] = {
"json": self.print_as_json,
"yaml": self.print_as_yaml,
"table": self.print_as_table,
"plain": self.print_as_plain_table,
}
renderer = output_to_renderer.get(output)
if not renderer:
raise ValueError(f"Unknown formatter: {output}. Allowed options: {list(output_to_renderer)}")
if mapper:
dict_data: Sequence[dict] = [mapper(d) for d in data]
elif is_data_sequence(data):
dict_data = data
else:
raise ValueError("To tabulate non-dictionary data you need to provide `mapper` function")
dict_data = [{k: self._normalize_data(v, output) for k, v in d.items()} for d in dict_data]
renderer(dict_data)
| AirflowConsole |
python | fastai__fastai | fastai/vision/augment.py | {
"start": 19856,
"end": 22291
} | class ____(RandTransform):
"Combine and apply affine and coord transforms"
order,split_idx = 30,None
def __init__(self,
aff_fs:Callable|MutableSequence=None, # Affine transformations function for a batch
coord_fs:Callable|MutableSequence=None, # Coordinate transformations function for a batch
size:int|tuple=None, # Output size, duplicated if one value is specified
mode='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
mode_mask='nearest', # Resample mode for mask
align_corners=None, # PyTorch `F.grid_sample` align_corners
**kwargs
):
store_attr(but=['aff_fs','coord_fs'])
super().__init__(**kwargs)
self.aff_fs,self.coord_fs = L(aff_fs),L(coord_fs)
self.cp_size = None if size is None else (size,size) if isinstance(size, int) else tuple(size)
def before_call(self,
b,
split_idx, # Index of the train/valid dataset
):
while isinstance(b, tuple): b = b[0]
self.split_idx = split_idx
self.do,self.mat = True,self._get_affine_mat(b)
for t in self.coord_fs: t.before_call(b)
def compose(self, tfm):
"Compose `self` with another `AffineCoordTfm` to only do the interpolation step once"
# TODO: keep `name` up to date with the combination
# TODO: have option to only show a subset of the attrs, e.g. for `Flip`
self.aff_fs += tfm.aff_fs
self.coord_fs += tfm.coord_fs
def _get_affine_mat(self, x):
aff_m = _init_mat(x)
if self.split_idx: return _prepare_mat(x, aff_m)
ms = [f(x) for f in self.aff_fs]
ms = [m for m in ms if m is not None]
for m in ms: aff_m = aff_m @ m
return _prepare_mat(x, aff_m)
def _encode(self, x, mode, reverse=False):
coord_func = None if len(self.coord_fs)==0 or self.split_idx else partial(compose_tfms, tfms=self.coord_fs, reverse=reverse)
return x.affine_coord(self.mat, coord_func, sz=self.size, mode=mode, pad_mode=self.pad_mode, align_corners=self.align_corners)
def encodes(self, x:TensorImage): return self._encode(x, self.mode)
def encodes(self, x:TensorMask): return self._encode(x, self.mode_mask)
def encodes(self, x:TensorPoint|TensorBBox): return self._encode(x, self.mode, reverse=True)
# %% ../../nbs/09_vision.augment.ipynb 104
| AffineCoordTfm |
python | wandb__wandb | wandb/vendor/pygments/lexers/asm.py | {
"start": 6281,
"end": 6626
} | class ____(DelegatingLexer):
"""
For the output of 'objdump -Sr on compiled C files'
"""
name = 'c-objdump'
aliases = ['c-objdump']
filenames = ['*.c-objdump']
mimetypes = ['text/x-c-objdump']
def __init__(self, **options):
super(CObjdumpLexer, self).__init__(CLexer, ObjdumpLexer, **options)
| CObjdumpLexer |
python | getsentry__sentry | src/sentry/workflow_engine/models/data_condition_group.py | {
"start": 617,
"end": 2045
} | class ____(DefaultFieldsModel):
"""
A data group is a way to specify a group of conditions that must be met for a workflow action to execute
"""
objects: ClassVar[BaseManager[Self]] = BaseManager(cache_fields=["id"])
__relocation_scope__ = RelocationScope.Organization
__repr__ = sane_repr("logic_type")
class Type(StrEnum):
# ANY will evaluate all conditions, and return true if any of those are met
ANY = "any"
# ANY_SHORT_CIRCUIT will stop evaluating conditions as soon as one is met
ANY_SHORT_CIRCUIT = "any-short"
# ALL will evaluate all conditions, and return true if all of those are met
ALL = "all"
# NONE will return true if none of the conditions are met, will return false immediately if any are met
NONE = "none"
logic_type = models.CharField(
max_length=200, choices=[(t.value, t.value) for t in Type], default=Type.ANY
)
organization = models.ForeignKey("sentry.Organization", on_delete=models.CASCADE)
def get_snapshot(self) -> DataConditionGroupSnapshot:
conditions = []
if is_model_attr_cached(self, "conditions"):
conditions = [cond.get_snapshot() for cond in self.conditions.all()]
return {
"id": self.id,
"logic_type": DataConditionGroup.Type(self.logic_type),
"conditions": conditions,
}
| DataConditionGroup |
python | pytorch__pytorch | test/inductor/test_xpu_basic.py | {
"start": 661,
"end": 1561
} | class ____(TestCase):
common = check_model_gpu
device = "xpu"
def test_add(self):
def fn(a, b):
return a + b
self.common(fn, (torch.rand(2, 3, 16, 16), torch.rand(2, 3, 16, 16)))
def test_sub(self):
def fn(a, b):
return a - b
self.common(fn, (torch.rand(2, 3, 16, 16), torch.rand(2, 3, 16, 16)))
def test_mul(self):
def fn(a, b):
return a * b
self.common(fn, (torch.rand(2, 3, 16, 16), torch.rand(2, 3, 16, 16)))
def test_div(self):
def fn(a, b):
return a / b
self.common(fn, (torch.rand(2, 3, 16, 16), torch.rand(2, 3, 16, 16)))
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
from torch.testing._internal.inductor_utils import HAS_XPU_AND_TRITON
if HAS_XPU_AND_TRITON:
run_tests(needs="filelock")
| XpuBasicTests |
python | pytorch__pytorch | test/distributed/checkpoint/test_fsdp_tp_checkpoint_conversion.py | {
"start": 989,
"end": 4016
} | class ____(DTensorTestBase):
@with_comms
@skip_if_lt_x_gpu(2)
@with_temp_dir
def test_fsdp_to_tp(self):
CHECKPOINT_DIR = self.temp_dir
model = MLPModule(self.device_type).to(self.rank)
# create a FSDP wrapped model
fsdp_model = FSDP(model, use_orig_params=True)
FSDP.set_state_dict_type(
fsdp_model,
StateDictType.SHARDED_STATE_DICT,
)
fsdp_state_dict = fsdp_model.state_dict()
# save fsdp_state_dict to storage
dist_cp.save(
state_dict=fsdp_state_dict,
storage_writer=dist_cp.FileSystemWriter(CHECKPOINT_DIR),
)
# create a TP wrapped model
mesh_shape = (self.world_size,)
device_mesh = init_device_mesh(self.device_type, mesh_shape)
model = MLPModule(self.device_type).to(self.rank)
# Parallelize the module based on the given Parallel Style.
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
tp_model = parallelize_module(model, device_mesh, parallelize_plan)
optimizer = torch.optim.SGD(tp_model.parameters(), lr=0.25)
# Update the parameters so tp_model.state_dict() will be different from fsdp_model.state_dict().
torch.manual_seed(0)
inp = torch.rand(20, 10).to(self.rank)
output = tp_model(inp)
output.sum().backward()
optimizer.step()
tp_state_dict = tp_model.state_dict()
# Check parameters are indeed different prior to loading.
for fsdp_item, tp_item in zip(fsdp_state_dict.items(), tp_state_dict.items()):
fsdp_k, fsdp_v = fsdp_item
tp_k, tp_v = tp_item
self.assertEqual(fsdp_k, tp_k)
if isinstance(fsdp_v, ShardedTensor) and isinstance(tp_v, DTensor):
fsdp_redistributed = _all_gather_sharded_tensor(fsdp_v)
tp_redistributed = tp_v.redistribute(
device_mesh, placements=[Replicate()]
).to_local()
self.assertNotEqual(fsdp_redistributed, tp_redistributed)
dist_cp.load(
state_dict=tp_state_dict,
storage_reader=dist_cp.FileSystemReader(CHECKPOINT_DIR),
)
tp_model.load_state_dict(tp_state_dict)
# Check parameters are equal after loading.
for fsdp_item, tp_item in zip(fsdp_state_dict.items(), tp_state_dict.items()):
fsdp_k, fsdp_v = fsdp_item
tp_k, tp_v = tp_item
self.assertEqual(fsdp_k, tp_k)
if isinstance(fsdp_v, ShardedTensor) and isinstance(tp_v, DTensor):
fsdp_redistributed = _all_gather_sharded_tensor(fsdp_v)
tp_redistributed = tp_v.redistribute(
device_mesh, placements=[Replicate()]
).to_local()
self.assertEqual(fsdp_redistributed, tp_redistributed)
if __name__ == "__main__":
run_tests()
| TestFsdpTpCheckpointConversion |
python | gevent__gevent | src/greentest/3.9/test_socket.py | {
"start": 204918,
"end": 207132
} | class ____(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
| TCPTimeoutTest |
python | wandb__wandb | wandb/vendor/pygments/lexers/praat.py | {
"start": 415,
"end": 12556
} | class ____(RegexLexer):
"""
For `Praat <http://www.praat.org>`_ scripts.
.. versionadded:: 2.1
"""
name = 'Praat'
aliases = ['praat']
filenames = ['*.praat', '*.proc', '*.psc']
keywords = (
'if', 'then', 'else', 'elsif', 'elif', 'endif', 'fi', 'for', 'from', 'to',
'endfor', 'endproc', 'while', 'endwhile', 'repeat', 'until', 'select', 'plus',
'minus', 'demo', 'assert', 'stopwatch', 'nocheck', 'nowarn', 'noprogress',
'editor', 'endeditor', 'clearinfo',
)
functions_string = (
'backslashTrigraphsToUnicode', 'chooseDirectory', 'chooseReadFile',
'chooseWriteFile', 'date', 'demoKey', 'do', 'environment', 'extractLine',
'extractWord', 'fixed', 'info', 'left', 'mid', 'percent', 'readFile', 'replace',
'replace_regex', 'right', 'selected', 'string', 'unicodeToBackslashTrigraphs',
)
functions_numeric = (
'abs', 'appendFile', 'appendFileLine', 'appendInfo', 'appendInfoLine', 'arccos',
'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'barkToHertz',
'beginPause', 'beginSendPraat', 'besselI', 'besselK', 'beta', 'beta2',
'binomialP', 'binomialQ', 'boolean', 'ceiling', 'chiSquareP', 'chiSquareQ',
'choice', 'comment', 'cos', 'cosh', 'createDirectory', 'deleteFile',
'demoClicked', 'demoClickedIn', 'demoCommandKeyPressed',
'demoExtraControlKeyPressed', 'demoInput', 'demoKeyPressed',
'demoOptionKeyPressed', 'demoShiftKeyPressed', 'demoShow', 'demoWaitForInput',
'demoWindowTitle', 'demoX', 'demoY', 'differenceLimensToPhon', 'do', 'editor',
'endPause', 'endSendPraat', 'endsWith', 'erb', 'erbToHertz', 'erf', 'erfc',
'exitScript', 'exp', 'extractNumber', 'fileReadable', 'fisherP', 'fisherQ',
'floor', 'gaussP', 'gaussQ', 'hertzToBark', 'hertzToErb', 'hertzToMel',
'hertzToSemitones', 'imax', 'imin', 'incompleteBeta', 'incompleteGammaP', 'index',
'index_regex', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ',
'invGaussQ', 'invSigmoid', 'invStudentQ', 'length', 'ln', 'lnBeta', 'lnGamma',
'log10', 'log2', 'max', 'melToHertz', 'min', 'minusObject', 'natural', 'number',
'numberOfColumns', 'numberOfRows', 'numberOfSelected', 'objectsAreIdentical',
'option', 'optionMenu', 'pauseScript', 'phonToDifferenceLimens', 'plusObject',
'positive', 'randomBinomial', 'randomGauss', 'randomInteger', 'randomPoisson',
'randomUniform', 'real', 'readFile', 'removeObject', 'rindex', 'rindex_regex',
'round', 'runScript', 'runSystem', 'runSystem_nocheck', 'selectObject',
'selected', 'semitonesToHertz', 'sentencetext', 'sigmoid', 'sin', 'sinc',
'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP',
'studentQ', 'tan', 'tanh', 'variableExists', 'word', 'writeFile', 'writeFileLine',
'writeInfo', 'writeInfoLine',
)
functions_array = (
'linear', 'randomGauss', 'randomInteger', 'randomUniform', 'zero',
)
objects = (
'Activation', 'AffineTransform', 'AmplitudeTier', 'Art', 'Artword',
'Autosegment', 'BarkFilter', 'BarkSpectrogram', 'CCA', 'Categories',
'Cepstrogram', 'Cepstrum', 'Cepstrumc', 'ChebyshevSeries', 'ClassificationTable',
'Cochleagram', 'Collection', 'ComplexSpectrogram', 'Configuration', 'Confusion',
'ContingencyTable', 'Corpus', 'Correlation', 'Covariance',
'CrossCorrelationTable', 'CrossCorrelationTables', 'DTW', 'DataModeler',
'Diagonalizer', 'Discriminant', 'Dissimilarity', 'Distance', 'Distributions',
'DurationTier', 'EEG', 'ERP', 'ERPTier', 'EditCostsTable', 'EditDistanceTable',
'Eigen', 'Excitation', 'Excitations', 'ExperimentMFC', 'FFNet', 'FeatureWeights',
'FileInMemory', 'FilesInMemory', 'Formant', 'FormantFilter', 'FormantGrid',
'FormantModeler', 'FormantPoint', 'FormantTier', 'GaussianMixture', 'HMM',
'HMM_Observation', 'HMM_ObservationSequence', 'HMM_State', 'HMM_StateSequence',
'Harmonicity', 'ISpline', 'Index', 'Intensity', 'IntensityTier', 'IntervalTier',
'KNN', 'KlattGrid', 'KlattTable', 'LFCC', 'LPC', 'Label', 'LegendreSeries',
'LinearRegression', 'LogisticRegression', 'LongSound', 'Ltas', 'MFCC', 'MSpline',
'ManPages', 'Manipulation', 'Matrix', 'MelFilter', 'MelSpectrogram',
'MixingMatrix', 'Movie', 'Network', 'OTGrammar', 'OTHistory', 'OTMulti', 'PCA',
'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo', 'Pitch',
'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial',
'PowerCepstrogram', 'PowerCepstrum', 'Procrustes', 'RealPoint', 'RealTier',
'ResultsMFC', 'Roots', 'SPINET', 'SSCP', 'SVD', 'Salience', 'ScalarProduct',
'Similarity', 'SimpleString', 'SortedSetOfString', 'Sound', 'Speaker',
'Spectrogram', 'Spectrum', 'SpectrumTier', 'SpeechSynthesizer', 'SpellingChecker',
'Strings', 'StringsIndex', 'Table', 'TableOfReal', 'TextGrid', 'TextInterval',
'TextPoint', 'TextTier', 'Tier', 'Transition', 'VocalTract', 'VocalTractTier',
'Weight', 'WordList',
)
variables_numeric = (
'macintosh', 'windows', 'unix', 'praatVersion', 'pi', 'e', 'undefined',
)
variables_string = (
'praatVersion', 'tab', 'shellDirectory', 'homeDirectory',
'preferencesDirectory', 'newline', 'temporaryDirectory',
'defaultDirectory',
)
tokens = {
'root': [
(r'(\s+)(#.*?$)', bygroups(Text, Comment.Single)),
(r'^#.*?$', Comment.Single),
(r';[^\n]*', Comment.Single),
(r'\s+', Text),
(r'\bprocedure\b', Keyword, 'procedure_definition'),
(r'\bcall\b', Keyword, 'procedure_call'),
(r'@', Name.Function, 'procedure_call'),
include('function_call'),
(words(keywords, suffix=r'\b'), Keyword),
(r'(\bform\b)(\s+)([^\n]+)',
bygroups(Keyword, Text, String), 'old_form'),
(r'(print(?:line|tab)?|echo|exit|asserterror|pause|send(?:praat|socket)|'
r'include|execute|system(?:_nocheck)?)(\s+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'(goto|label)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
include('variable_name'),
include('number'),
(r'"', String, 'string'),
(words((objects), suffix=r'(?=\s+\S+\n)'), Name.Class, 'string_unquoted'),
(r'\b[A-Z]', Keyword, 'command'),
(r'(\.{3}|[)(,])', Punctuation),
],
'command': [
(r'( ?[\w()-]+ ?)', Keyword),
(r"'(?=.*')", String.Interpol, 'string_interpolated'),
(r'\.{3}', Keyword, ('#pop', 'old_arguments')),
(r':', Keyword, ('#pop', 'comma_list')),
(r'\s', Text, '#pop'),
],
'procedure_call': [
(r'\s+', Text),
(r'([\w.]+)(:|\s*\()',
bygroups(Name.Function, Text), '#pop'),
(r'([\w.]+)', Name.Function, ('#pop', 'old_arguments')),
],
'procedure_definition': [
(r'\s', Text),
(r'([\w.]+)(\s*?[(:])',
bygroups(Name.Function, Text), '#pop'),
(r'([\w.]+)([^\n]*)',
bygroups(Name.Function, Text), '#pop'),
],
'function_call': [
(words(functions_string, suffix=r'\$(?=\s*[:(])'), Name.Function, 'function'),
(words(functions_array, suffix=r'#(?=\s*[:(])'), Name.Function, 'function'),
(words(functions_numeric, suffix=r'(?=\s*[:(])'), Name.Function, 'function'),
],
'function': [
(r'\s+', Text),
(r':', Punctuation, ('#pop', 'comma_list')),
(r'\s*\(', Punctuation, ('#pop', 'comma_list')),
],
'comma_list': [
(r'(\s*\n\s*)(\.{3})', bygroups(Text, Punctuation)),
(r'(\s*[])\n])', Text, '#pop'),
(r'\s+', Text),
(r'"', String, 'string'),
(r'\b(if|then|else|fi|endif)\b', Keyword),
include('function_call'),
include('variable_name'),
include('operator'),
include('number'),
(r'[()]', Text),
(r',', Punctuation),
],
'old_arguments': [
(r'\n', Text, '#pop'),
include('variable_name'),
include('operator'),
include('number'),
(r'"', String, 'string'),
(r'[^\n]', Text),
],
'number': [
(r'\n', Text, '#pop'),
(r'\b\d+(\.\d*)?([eE][-+]?\d+)?%?', Number),
],
'object_attributes': [
(r'\.?(n(col|row)|[xy]min|[xy]max|[nd][xy])\b', Name.Builtin, '#pop'),
(r'(\.?(?:col|row)\$)(\[)',
bygroups(Name.Builtin, Text), 'variable_name'),
(r'(\$?)(\[)',
bygroups(Name.Builtin, Text), ('#pop', 'comma_list')),
],
'variable_name': [
include('operator'),
include('number'),
(words(variables_string, suffix=r'\$'), Name.Variable.Global),
(words(variables_numeric, suffix=r'\b'), Name.Variable.Global),
(r'\bObject_\w+', Name.Builtin, 'object_attributes'),
(words(objects, prefix=r'\b', suffix=r'_\w+'),
Name.Builtin, 'object_attributes'),
(r"\b(Object_)(')",
bygroups(Name.Builtin, String.Interpol),
('object_attributes', 'string_interpolated')),
(words(objects, prefix=r'\b', suffix=r"(_)(')"),
bygroups(Name.Builtin, Name.Builtin, String.Interpol),
('object_attributes', 'string_interpolated')),
(r'\.?_?[a-z][\w.]*(\$|#)?', Text),
(r'[\[\]]', Punctuation, 'comma_list'),
(r"'(?=.*')", String.Interpol, 'string_interpolated'),
],
'operator': [
(r'([+\/*<>=!-]=?|[&*|][&*|]?|\^|<>)', Operator),
(r'(?<![\w.])(and|or|not|div|mod)(?![\w.])', Operator.Word),
],
'string_interpolated': [
(r'\.?[_a-z][\w.]*[$#]?(?:\[[a-zA-Z0-9,]+\])?(:[0-9]+)?',
String.Interpol),
(r"'", String.Interpol, '#pop'),
],
'string_unquoted': [
(r'(\n\s*)(\.{3})', bygroups(Text, Punctuation)),
(r'\n', Text, '#pop'),
(r'\s', Text),
(r"'(?=.*')", String.Interpol, 'string_interpolated'),
(r"'", String),
(r"[^'\n]+", String),
],
'string': [
(r'(\n\s*)(\.{3})', bygroups(Text, Punctuation)),
(r'"', String, '#pop'),
(r"'(?=.*')", String.Interpol, 'string_interpolated'),
(r"'", String),
(r'[^\'"\n]+', String),
],
'old_form': [
(r'\s+', Text),
(r'(optionmenu|choice)([ \t]+\S+:[ \t]+)',
bygroups(Keyword, Text), 'number'),
(r'(option|button)([ \t]+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'(sentence|text)([ \t]+\S+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'(word)([ \t]+\S+[ \t]*)(\S+)?([ \t]+.*)?',
bygroups(Keyword, Text, String, Text)),
(r'(boolean)(\s+\S+\s*)(0|1|"?(?:yes|no)"?)',
bygroups(Keyword, Text, Name.Variable)),
# Ideally processing of the number would happend in the 'number'
# but that doesn't seem to work
(r'(real|natural|positive|integer)([ \t]+\S+[ \t]*)([+-]?)(\d+(?:\.\d*)?'
r'(?:[eE][-+]?\d+)?%?)',
bygroups(Keyword, Text, Operator, Number)),
(r'(comment)(\s+)',
bygroups(Keyword, Text), 'string_unquoted'),
(r'\bendform\b', Keyword, '#pop'),
]
}
| PraatLexer |
python | pytorch__pytorch | torch/testing/_internal/autograd_function_db.py | {
"start": 9113,
"end": 10661
} | class ____(torch.autograd.Function):
@staticmethod
def forward(x, ind, ind_inv, dim):
device = x.device
x = to_numpy(x)
ind = to_numpy(ind)
return torch.tensor(np.take_along_axis(x, ind, dim), device=device)
@staticmethod
def setup_context(ctx, inputs, output):
_x, ind, ind_inv, dim = inputs
ctx.save_for_backward(ind, ind_inv)
ctx.save_for_forward(ind, ind_inv)
ctx.dim = dim
@staticmethod
def backward(ctx, grad_output):
ind, ind_inv = ctx.saved_tensors
result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim)
return result, None, None, None
@staticmethod
def vmap(info, in_dims, x, ind, ind_inv, dim):
x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims
# wrap dim
logical_dim = x.dim() if x_bdim is None else x_bdim - 1
dim = dim if dim >= 0 else dim + logical_dim
def expand_bdim(x, x_bdim):
if x_bdim is None:
return x.expand(info.batch_size, *x.shape)
return x.movedim(x_bdim, 0)
x = expand_bdim(x, x_bdim)
ind = expand_bdim(ind, ind_bdim)
ind_inv = expand_bdim(ind_inv, ind_inv_bdim)
return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0
@staticmethod
def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _):
assert ind_tangent is None
assert ind_inv_tangent is None
ind, ind_inv = ctx.saved_tensors
return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim)
| NumpyTake |
python | google__flatbuffers | python/flatbuffers/flexbuffers.py | {
"start": 10257,
"end": 10493
} | class ____(Sized):
"""Data accessor for the encoded blob bytes."""
__slots__ = ()
@property
def Bytes(self):
return self._buf[0 : len(self)]
def __repr__(self):
return 'Blob(%s, size=%d)' % (self._buf, len(self))
| Blob |
python | huggingface__transformers | src/transformers/models/markuplm/modeling_markuplm.py | {
"start": 10881,
"end": 11682
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->MarkupLM
| MarkupLMPredictionHeadTransform |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 534176,
"end": 534719
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of CreateBranchProtectionRule"""
__schema__ = github_schema
__field_names__ = ("branch_protection_rule", "client_mutation_id")
branch_protection_rule = sgqlc.types.Field("BranchProtectionRule", graphql_name="branchProtectionRule")
"""The newly created BranchProtectionRule."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| CreateBranchProtectionRulePayload |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-baseten/llama_index/embeddings/baseten/base.py | {
"start": 460,
"end": 3580
} | class ____(OpenAIEmbedding):
"""
Baseten class for embeddings.
Args:
model_id (str): The Baseten model ID (e.g., "03y7n6e3").
api_key (Optional[str]): The Baseten API key.
embed_batch_size (int): The batch size for embedding calls.
additional_kwargs (Optional[Dict[str, Any]]): Additional kwargs for the API.
max_retries (int): The maximum number of retries to make.
timeout (float): Timeout for each request.
callback_manager (Optional[CallbackManager]): Callback manager for logging.
default_headers (Optional[Dict[str, str]]): Default headers for API requests.
Examples:
```python
from llama_index.embeddings.baseten import BasetenEmbedding
# Using dedicated endpoint
# You can find the model_id by in the Baseten dashboard here: https://app.baseten.co/overview
embed_model = BasetenEmbedding(
model_id="MODEL_ID,
api_key="YOUR_API_KEY",
)
# Single embedding
embedding = embed_model.get_text_embedding("Hello, world!")
# Batch embeddings
embeddings = embed_model.get_text_embedding_batch([
"Hello, world!",
"Goodbye, world!"
])
```
"""
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the OpenAI API."
)
api_key: str = Field(description="The Baseten API key.")
api_base: str = Field(default="", description="The base URL for Baseten API.")
api_version: str = Field(default="", description="The version for OpenAI API.")
def __init__(
self,
model_id: str,
dimensions: Optional[int] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
**kwargs: Any,
) -> None:
# Use the dedicated endpoint URL format
api_base = DEFAULT_API_BASE.format(model_id=model_id)
api_key = get_from_param_or_env("api_key", api_key, "BASETEN_API_KEY")
super().__init__(
model_name=model_id,
dimensions=dimensions,
embed_batch_size=embed_batch_size,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "BasetenEmbedding"
| BasetenEmbedding |
python | tensorflow__tensorflow | tensorflow/python/debug/wrappers/framework.py | {
"start": 31079,
"end": 33283
} | class ____:
"""Type for return values of watch_fn."""
def __init__(self,
debug_ops=None,
node_name_regex_allowlist=None,
op_type_regex_allowlist=None,
tensor_dtype_regex_allowlist=None,
tolerate_debug_op_creation_failures=False):
"""Constructor of WatchOptions: Debug watch options.
Used as return values of `watch_fn`s.
Args:
debug_ops: (`str` or `list of str`) Debug ops to be used.
node_name_regex_allowlist: Regular-expression allowlist for node_name,
e.g., `"(weight_[0-9]+|bias_.*)"`
op_type_regex_allowlist: Regular-expression allowlist for the op type of
nodes, e.g., `"(Variable|Add)"`.
If both `node_name_regex_allowlist` and `op_type_regex_allowlist`
are set, the two filtering operations will occur in a logical `AND`
relation. In other words, a node will be included if and only if it
hits both allowlists.
tensor_dtype_regex_allowlist: Regular-expression allowlist for Tensor
data type, e.g., `"^int.*"`.
This allowlist operates in logical `AND` relations to the two allowlists
above.
tolerate_debug_op_creation_failures: (`bool`) whether debug op creation
failures (e.g., due to dtype incompatibility) are to be tolerated by not
throwing exceptions.
"""
if debug_ops:
self.debug_ops = debug_ops
else:
self.debug_ops = ["DebugIdentity"]
self.node_name_regex_allowlist = node_name_regex_allowlist
self.op_type_regex_allowlist = op_type_regex_allowlist
self.tensor_dtype_regex_allowlist = tensor_dtype_regex_allowlist
self.tolerate_debug_op_creation_failures = (
tolerate_debug_op_creation_failures)
def __repr__(self):
return ("WatchOptions(debug_ops=%r, node_name_regex_allowlist=%r, "
"op_type_regex_allowlist=%r, tensor_dtype_regex_allowlist=%r, "
"tolerate_debug_op_creation_failures=%r)" %
(self.debug_ops, self.node_name_regex_allowlist,
self.op_type_regex_allowlist, self.tensor_dtype_regex_allowlist,
self.tolerate_debug_op_creation_failures))
| WatchOptions |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/call_to_user_function_test.py | {
"start": 1505,
"end": 2464
} | class ____(reference_test_base.TestCase):
def test_basic(self):
self.assertFunctionMatchesEager(static_fn, 1)
self.assertFunctionMatchesEager(factory_dynamic_fn, 1)
self.assertFunctionMatchesEager(param_dynamic_fn, function_1, 1)
self.assertFunctionMatchesEager(variable_dynamic_fn, 1)
self.assertFunctionMatchesEager(variable_dynamic_whitelisted_fn, 1)
self.assertFunctionMatchesEager(dynamic_fn_with_kwargs, function_1, 1)
def test_basic_tensor(self):
self.all_inputs_tensors = True
self.assertFunctionMatchesEager(static_fn, 1)
self.assertFunctionMatchesEager(factory_dynamic_fn, 1)
self.assertFunctionMatchesEager(param_dynamic_fn, function_1, 1)
self.assertFunctionMatchesEager(variable_dynamic_fn, 1)
self.assertFunctionMatchesEager(variable_dynamic_whitelisted_fn, 1)
self.assertFunctionMatchesEager(dynamic_fn_with_kwargs, function_1, 1)
if __name__ == '__main__':
tf.test.main()
| ReferenceTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 84106,
"end": 85176
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"repository_id",
"base_ref_name",
"head_ref_name",
"title",
"body",
"maintainer_can_modify",
"draft",
"client_mutation_id",
)
repository_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryId"
)
base_ref_name = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="baseRefName"
)
head_ref_name = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="headRefName"
)
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
body = sgqlc.types.Field(String, graphql_name="body")
maintainer_can_modify = sgqlc.types.Field(
Boolean, graphql_name="maintainerCanModify"
)
draft = sgqlc.types.Field(Boolean, graphql_name="draft")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| CreatePullRequestInput |
python | pandas-dev__pandas | pandas/tests/extension/json/test_json.py | {
"start": 1719,
"end": 18221
} | class ____(base.ExtensionTests):
@pytest.mark.xfail(
reason="comparison method not implemented for JSONArray (GH-37867)"
)
def test_contains(self, data):
# GH-37867
super().test_contains(data)
@pytest.mark.xfail(reason="not implemented constructor from dtype")
def test_from_dtype(self, data):
# construct from our dtype & string dtype
super().test_from_dtype(data)
@pytest.mark.xfail(reason="RecursionError, GH-33900")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
# RecursionError: maximum recursion depth exceeded in comparison
rec_limit = sys.getrecursionlimit()
try:
# Limit to avoid stack overflow on Windows CI
sys.setrecursionlimit(100)
super().test_series_constructor_no_data_with_index(dtype, na_value)
finally:
sys.setrecursionlimit(rec_limit)
@pytest.mark.xfail(reason="RecursionError, GH-33900")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
# RecursionError: maximum recursion depth exceeded in comparison
rec_limit = sys.getrecursionlimit()
try:
# Limit to avoid stack overflow on Windows CI
sys.setrecursionlimit(100)
super().test_series_constructor_scalar_na_with_index(dtype, na_value)
finally:
sys.setrecursionlimit(rec_limit)
@pytest.mark.xfail(reason="collection as scalar, GH-33901")
def test_series_constructor_scalar_with_index(self, data, dtype):
# TypeError: All values must be of type <class 'collections.abc.Mapping'>
rec_limit = sys.getrecursionlimit()
try:
# Limit to avoid stack overflow on Windows CI
sys.setrecursionlimit(100)
super().test_series_constructor_scalar_with_index(data, dtype)
finally:
sys.setrecursionlimit(rec_limit)
@pytest.mark.xfail(reason="Different definitions of NA")
def test_stack(self):
"""
The test does .astype(object).stack(). If we happen to have
any missing values in `data`, then we'll end up with different
rows since we consider `{}` NA, but `.astype(object)` doesn't.
"""
super().test_stack()
@pytest.mark.xfail(reason="dict for NA")
def test_unstack(self, data, index):
# The base test has NaN for the expected NA value.
# this matches otherwise
return super().test_unstack(data, index)
@pytest.mark.xfail(reason="Setting a dict as a scalar")
def test_fillna_series(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
super().test_fillna_series()
@pytest.mark.xfail(reason="Setting a dict as a scalar")
def test_fillna_frame(self):
"""We treat dictionaries as a mapping in fillna, not a scalar."""
super().test_fillna_frame()
def test_fillna_with_none(self, data_missing):
# GH#57723
# EAs that don't have special logic for None will raise, unlike pandas'
# which interpret None as the NA value for the dtype.
with pytest.raises(AssertionError):
super().test_fillna_with_none(data_missing)
@pytest.mark.xfail(reason="fill value is a dictionary, takes incorrect code path")
def test_fillna_limit_frame(self, data_missing):
# GH#58001
super().test_fillna_limit_frame(data_missing)
@pytest.mark.xfail(reason="fill value is a dictionary, takes incorrect code path")
def test_fillna_limit_series(self, data_missing):
# GH#58001
super().test_fillna_limit_frame(data_missing)
@pytest.mark.parametrize(
"limit_area, input_ilocs, expected_ilocs",
[
("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]),
("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]),
("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]),
("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]),
("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]),
("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]),
("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]),
("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]),
],
)
def test_ffill_limit_area(
self, data_missing, limit_area, input_ilocs, expected_ilocs
):
# GH#56616
msg = "JSONArray does not implement limit_area"
with pytest.raises(NotImplementedError, match=msg):
super().test_ffill_limit_area(
data_missing, limit_area, input_ilocs, expected_ilocs
)
def test_value_counts(self, all_data, dropna, request):
if len(all_data) == 10 or dropna:
request.applymarker(unhashable)
super().test_value_counts(all_data, dropna)
@unhashable
def test_sort_values_frame(self):
# TODO (EA.factorize): see if _values_for_factorize allows this.
super().test_sort_values_frame()
@pytest.mark.xfail(reason="combine for JSONArray not supported")
def test_combine_le(self, data_repeated):
super().test_combine_le(data_repeated)
@pytest.mark.xfail(
reason="combine for JSONArray not supported - "
"may pass depending on random data",
strict=False,
raises=AssertionError,
)
def test_combine_first(self, data):
super().test_combine_first(data)
@pytest.mark.xfail(reason="broadcasting error")
def test_where_series(self, data, na_value):
# Fails with
# *** ValueError: operands could not be broadcast together
# with shapes (4,) (4,) (0,)
super().test_where_series(data, na_value)
@pytest.mark.xfail(reason="Can't compare dicts.")
def test_searchsorted(self, data_for_sorting):
super().test_searchsorted(data_for_sorting)
@pytest.mark.xfail(reason="Can't compare dicts.")
def test_equals(self, data, na_value, as_series):
super().test_equals(data, na_value, as_series)
@pytest.mark.skip("fill-value is interpreted as a dict of values")
def test_fillna_copy_frame(self, data_missing):
super().test_fillna_copy_frame(data_missing)
@pytest.mark.xfail(reason="Fails with CoW")
def test_equals_same_data_different_object(self, data):
super().test_equals_same_data_different_object(data)
@pytest.mark.xfail(reason="failing on np.array(self, dtype=str)")
def test_astype_str(self):
"""This currently fails in NumPy on np.array(self, dtype=str) with
*** ValueError: setting an array element with a sequence
"""
super().test_astype_str()
@unhashable
def test_groupby_extension_transform(self):
"""
This currently fails in Series.name.setter, since the
name must be hashable, but the value is a dictionary.
I think this is what we want, i.e. `.name` should be the original
values, and not the values for factorization.
"""
super().test_groupby_extension_transform()
@unhashable
def test_groupby_extension_apply(self):
"""
This fails in Index._do_unique_check with
> hash(val)
E TypeError: unhashable type: 'UserDict' with
I suspect that once we support Index[ExtensionArray],
we'll be able to dispatch unique.
"""
super().test_groupby_extension_apply()
@unhashable
def test_groupby_extension_agg(self):
"""
This fails when we get to tm.assert_series_equal when left.index
contains dictionaries, which are not hashable.
"""
super().test_groupby_extension_agg()
@unhashable
def test_groupby_extension_no_sort(self):
"""
This fails when we get to tm.assert_series_equal when left.index
contains dictionaries, which are not hashable.
"""
super().test_groupby_extension_no_sort()
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
if len(data[0]) != 1:
mark = pytest.mark.xfail(reason="raises in coercing to Series")
request.applymarker(mark)
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
def test_compare_array(self, data, comparison_op, request):
if comparison_op.__name__ in ["eq", "ne"]:
mark = pytest.mark.xfail(reason="Comparison methods not implemented")
request.applymarker(mark)
super().test_compare_array(data, comparison_op)
@pytest.mark.xfail(reason="ValueError: Must have equal len keys and value")
def test_setitem_loc_scalar_mixed(self, data):
super().test_setitem_loc_scalar_mixed(data)
@pytest.mark.xfail(reason="ValueError: Must have equal len keys and value")
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
super().test_setitem_loc_scalar_multiple_homogoneous(data)
@pytest.mark.xfail(reason="ValueError: Must have equal len keys and value")
def test_setitem_iloc_scalar_mixed(self, data):
super().test_setitem_iloc_scalar_mixed(data)
@pytest.mark.xfail(reason="ValueError: Must have equal len keys and value")
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
super().test_setitem_iloc_scalar_multiple_homogoneous(data)
@pytest.mark.parametrize(
"mask",
[
np.array([True, True, True, False, False]),
pd.array([True, True, True, False, False], dtype="boolean"),
pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),
],
ids=["numpy-array", "boolean-array", "boolean-array-na"],
)
def test_setitem_mask(self, data, mask, box_in_series, request):
if box_in_series:
mark = pytest.mark.xfail(
reason="cannot set using a list-like indexer with a different length"
)
request.applymarker(mark)
elif not isinstance(mask, np.ndarray):
mark = pytest.mark.xfail(reason="Issues unwanted DeprecationWarning")
request.applymarker(mark)
super().test_setitem_mask(data, mask, box_in_series)
def test_setitem_mask_raises(self, data, box_in_series, request):
if not box_in_series:
mark = pytest.mark.xfail(reason="Fails to raise")
request.applymarker(mark)
super().test_setitem_mask_raises(data, box_in_series)
@pytest.mark.xfail(
reason="cannot set using a list-like indexer with a different length"
)
def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):
super().test_setitem_mask_boolean_array_with_na(data, box_in_series)
@pytest.mark.parametrize(
"idx",
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
ids=["list", "integer-array", "numpy-array"],
)
def test_setitem_integer_array(self, data, idx, box_in_series, request):
if box_in_series:
mark = pytest.mark.xfail(
reason="cannot set using a list-like indexer with a different length"
)
request.applymarker(mark)
super().test_setitem_integer_array(data, idx, box_in_series)
@pytest.mark.parametrize(
"idx",
[
[0, 1, 2, pd.NA],
pd.array([0, 1, 2, pd.NA], dtype="Int64"),
],
ids=["list", "integer-array"],
)
@pytest.mark.parametrize(
"box_in_series",
[
True,
pytest.param(
False,
marks=pytest.mark.xfail(
reason="list indices must be integers or slices, not NAType"
),
),
],
)
def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
super().test_setitem_integer_with_missing_raises(data, idx, box_in_series)
@pytest.mark.xfail(reason="Fails to raise")
def test_setitem_scalar_key_sequence_raise(self, data):
super().test_setitem_scalar_key_sequence_raise(data)
def test_setitem_with_expansion_dataframe_column(self, data, full_indexer, request):
if "full_slice" in request.node.name:
mark = pytest.mark.xfail(reason="slice is not iterable")
request.applymarker(mark)
super().test_setitem_with_expansion_dataframe_column(data, full_indexer)
@pytest.mark.xfail(reason="slice is not iterable")
def test_setitem_frame_2d_values(self, data):
super().test_setitem_frame_2d_values(data)
@pytest.mark.xfail(
reason="cannot set using a list-like indexer with a different length"
)
@pytest.mark.parametrize("setter", ["loc", None])
def test_setitem_mask_broadcast(self, data, setter):
super().test_setitem_mask_broadcast(data, setter)
@pytest.mark.xfail(
reason="cannot set using a slice indexer with a different length"
)
def test_setitem_slice(self, data, box_in_series):
super().test_setitem_slice(data, box_in_series)
@pytest.mark.xfail(reason="slice object is not iterable")
def test_setitem_loc_iloc_slice(self, data):
super().test_setitem_loc_iloc_slice(data)
@pytest.mark.xfail(reason="slice object is not iterable")
def test_setitem_slice_mismatch_length_raises(self, data):
super().test_setitem_slice_mismatch_length_raises(data)
@pytest.mark.xfail(reason="slice object is not iterable")
def test_setitem_slice_array(self, data):
super().test_setitem_slice_array(data)
@pytest.mark.xfail(reason="Fail to raise")
def test_setitem_invalid(self, data, invalid_scalar):
super().test_setitem_invalid(data, invalid_scalar)
@pytest.mark.xfail(
reason="result readonly flag is incorrect and does not support na_value"
)
def test_readonly_propagates_to_numpy_array_method(self, data):
super().test_readonly_propagates_to_numpy_array_method(data)
@pytest.mark.xfail(reason="only integer scalar arrays can be converted")
def test_setitem_2d_values(self, data):
super().test_setitem_2d_values(data)
@pytest.mark.xfail(reason="data type 'json' not understood")
@pytest.mark.parametrize("engine", ["c", "python"])
def test_EA_types(self, engine, data, request):
super().test_EA_types(engine, data, request)
def custom_assert_series_equal(left, right, *args, **kwargs):
# NumPy doesn't handle an array of equal-length UserDicts.
# The default assert_series_equal eventually does a
# Series.values, which raises. We work around it by
# converting the UserDicts to dicts.
if left.dtype.name == "json":
assert left.dtype == right.dtype
left = pd.Series(
JSONArray(left.values.astype(object)), index=left.index, name=left.name
)
right = pd.Series(
JSONArray(right.values.astype(object)),
index=right.index,
name=right.name,
)
tm.assert_series_equal(left, right, *args, **kwargs)
def custom_assert_frame_equal(left, right, *args, **kwargs):
obj_type = kwargs.get("obj", "DataFrame")
tm.assert_index_equal(
left.columns,
right.columns,
exact=kwargs.get("check_column_type", "equiv"),
check_names=kwargs.get("check_names", True),
check_exact=kwargs.get("check_exact", False),
check_categorical=kwargs.get("check_categorical", True),
obj=f"{obj_type}.columns",
)
jsons = (left.dtypes == "json").index
for col in jsons:
custom_assert_series_equal(left[col], right[col], *args, **kwargs)
left = left.drop(columns=jsons)
right = right.drop(columns=jsons)
tm.assert_frame_equal(left, right, *args, **kwargs)
def test_custom_asserts():
# This would always trigger the KeyError from trying to put
# an array of equal-length UserDicts inside an ndarray.
data = JSONArray(
[
collections.UserDict({"a": 1}),
collections.UserDict({"b": 2}),
collections.UserDict({"c": 3}),
]
)
a = pd.Series(data)
custom_assert_series_equal(a, a)
custom_assert_frame_equal(a.to_frame(), a.to_frame())
b = pd.Series(data.take([0, 0, 1]))
msg = r"Series are different"
with pytest.raises(AssertionError, match=msg):
custom_assert_series_equal(a, b)
with pytest.raises(AssertionError, match=msg):
custom_assert_frame_equal(a.to_frame(), b.to_frame())
| TestJSONArray |
python | allegroai__clearml | examples/frameworks/fire/fire_grouping_cmd.py | {
"start": 556,
"end": 893
} | class ____(object):
def __init__(self):
self.ingestion = IngestionStage()
self.digestion = DigestionStage()
def run(self):
self.ingestion.run()
self.digestion.run()
if __name__ == "__main__":
Task.init(project_name="examples", task_name="Fire grouping command")
fire.Fire(Pipeline)
| Pipeline |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 52006,
"end": 52955
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"pull_request_id",
"pull_request_review_id",
"commit_oid",
"body",
"path",
"position",
"in_reply_to",
"client_mutation_id",
)
pull_request_id = sgqlc.types.Field(ID, graphql_name="pullRequestId")
pull_request_review_id = sgqlc.types.Field(ID, graphql_name="pullRequestReviewId")
commit_oid = sgqlc.types.Field(GitObjectID, graphql_name="commitOID")
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
path = sgqlc.types.Field(String, graphql_name="path")
position = sgqlc.types.Field(Int, graphql_name="position")
in_reply_to = sgqlc.types.Field(ID, graphql_name="inReplyTo")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| AddPullRequestReviewCommentInput |
python | doocs__leetcode | solution/1400-1499/1474.Delete N Nodes After M Nodes of a Linked List/Solution.py | {
"start": 151,
"end": 642
} | class ____:
def deleteNodes(self, head: ListNode, m: int, n: int) -> ListNode:
pre = head
while pre:
for _ in range(m - 1):
if pre:
pre = pre.next
if pre is None:
return head
cur = pre
for _ in range(n):
if cur:
cur = cur.next
pre.next = None if cur is None else cur.next
pre = pre.next
return head
| Solution |
python | faif__python-patterns | patterns/behavioral/command.py | {
"start": 1911,
"end": 2898
} | class ____:
"""
The invoker class. Here it is items in a menu.
"""
def __init__(self, command: Union[HideFileCommand, DeleteFileCommand]) -> None:
self._command = command
def on_do_press(self, filename: str) -> None:
self._command.execute(filename)
def on_undo_press(self) -> None:
self._command.undo()
def main():
"""
>>> item1 = MenuItem(DeleteFileCommand())
>>> item2 = MenuItem(HideFileCommand())
# create a file named `test-file` to work with
>>> test_file_name = 'test-file'
# deleting `test-file`
>>> item1.on_do_press(test_file_name)
deleting test-file
# restoring `test-file`
>>> item1.on_undo_press()
restoring test-file
# hiding `test-file`
>>> item2.on_do_press(test_file_name)
hiding test-file
# un-hiding `test-file`
>>> item2.on_undo_press()
un-hiding test-file
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| MenuItem |
python | ipython__ipython | IPython/core/prefilter.py | {
"start": 15537,
"end": 15978
} | class ____(PrefilterChecker):
priority = Integer(300).tag(config=True)
def check(self, line_info):
"Instances of IPyAutocall in user_ns get autocalled immediately"
obj = self.shell.user_ns.get(line_info.ifun, None)
if isinstance(obj, IPyAutocall):
obj.set_ip(self.shell)
return self.prefilter_manager.get_handler_by_name('auto')
else:
return None
| IPyAutocallChecker |
python | pytorch__pytorch | benchmarks/fastrnns/custom_lstms.py | {
"start": 5005,
"end": 6389
} | class ____(jit.ScriptModule):
def __init__(self, input_size, hidden_size, decompose_layernorm=False):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = Parameter(torch.randn(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.randn(4 * hidden_size, hidden_size))
# The layernorms provide learnable biases
if decompose_layernorm:
ln = LayerNorm
else:
ln = nn.LayerNorm
self.layernorm_i = ln(4 * hidden_size)
self.layernorm_h = ln(4 * hidden_size)
self.layernorm_c = ln(hidden_size)
@jit.script_method
def forward(
self, input: Tensor, state: tuple[Tensor, Tensor]
) -> tuple[Tensor, tuple[Tensor, Tensor]]:
hx, cx = state
igates = self.layernorm_i(torch.mm(input, self.weight_ih.t()))
hgates = self.layernorm_h(torch.mm(hx, self.weight_hh.t()))
gates = igates + hgates
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = self.layernorm_c((forgetgate * cx) + (ingate * cellgate))
hy = outgate * torch.tanh(cy)
return hy, (hy, cy)
| LayerNormLSTMCell |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_dynamic/_importer.py | {
"start": 9263,
"end": 14519
} | class ____(FileLoader):
"""Load modules from ``.pyi`` stub files."""
def get_source(self, fullname: str) -> str:
path = self.get_filename(fullname)
for suffix in _NATIVE_SUFFIXES:
if not path.endswith(suffix):
continue
path = path.removesuffix(suffix) + '.pyi'
try:
source_bytes = self.get_data(path)
except OSError as exc:
raise ImportError from exc
return decode_source(source_bytes)
def _reload_module(module: ModuleType) -> Any:
"""Call importlib.reload(module), convert exceptions to ImportError"""
try:
return importlib.reload(module)
except BaseException as exc:
# Importing modules may cause any side effects, including
# SystemExit, so we need to catch all errors.
raise ImportError(exc, traceback.format_exc()) from exc
def _mangle_name(subject: Any, name: str) -> str:
"""Mangle the given name."""
try:
if isclass(subject) and name.startswith('__') and not name.endswith('__'):
return f'_{subject.__name__}{name}'
except AttributeError:
pass
return name
def _import_data_declaration(
*,
module_name: str,
obj_path: Sequence[str],
mock_imports: Sequence[str],
type_aliases: Mapping[str, str] | None,
) -> _ImportedObject | None:
# annotation only instance variable (PEP-526)
try:
with mock(mock_imports):
parent = _import_module(module_name)
annotations = get_type_hints(parent, None, type_aliases, include_extras=True)
if obj_path[-1] in annotations:
im = _ImportedObject(
parent=parent,
obj=UNINITIALIZED_ATTR,
)
return im
except ImportError:
pass
return None
def _import_attribute_declaration(
*,
module_name: str,
obj_path: Sequence[str],
mock_imports: Sequence[str],
type_aliases: Mapping[str, str] | None,
get_attr: _AttrGetter = safe_getattr,
) -> _ImportedObject | None:
# Support runtime & uninitialized instance attributes.
#
# The former are defined in __init__() methods with doc-comments.
# The latter are PEP-526 style annotation only annotations.
#
# class Foo:
# attr: int #: uninitialized attribute
#
# def __init__(self):
# self.attr = None #: runtime attribute
try:
with mock(mock_imports):
ret = _import_from_module_and_path(
module_name=module_name, obj_path=obj_path[:-1], get_attr=get_attr
)
parent = ret.obj
if _is_runtime_instance_attribute(parent=parent, obj_path=obj_path):
im = _ImportedObject(
parent=parent,
obj=RUNTIME_INSTANCE_ATTRIBUTE,
)
return im
elif _is_uninitialized_instance_attribute(
parent=parent, obj_path=obj_path, type_aliases=type_aliases
):
im = _ImportedObject(
parent=parent,
obj=UNINITIALIZED_ATTR,
)
return im
except ImportError:
pass
return None
def _is_runtime_instance_attribute(*, parent: Any, obj_path: Sequence[str]) -> bool:
"""Check the subject is an attribute defined in __init__()."""
# An instance variable defined in __init__().
if _get_attribute_comment(parent=parent, obj_path=obj_path, attrname=obj_path[-1]):
return True
return _is_runtime_instance_attribute_not_commented(
parent=parent, obj_path=obj_path
)
def _is_runtime_instance_attribute_not_commented(
*, parent: Any, obj_path: Sequence[str]
) -> bool:
"""Check the subject is an attribute defined in __init__() without comment."""
for cls in inspect.getmro(parent):
try:
module = safe_getattr(cls, '__module__')
qualname = safe_getattr(cls, '__qualname__')
analyzer = ModuleAnalyzer.for_module(module)
analyzer.analyze()
if qualname and obj_path:
key = f'{qualname}.{obj_path[-1]}'
if key in analyzer.tagorder:
return True
except (AttributeError, PycodeError):
pass
return False
def _get_attribute_comment(
parent: Any, obj_path: Sequence[str], attrname: str
) -> list[str] | None:
for cls in inspect.getmro(parent):
try:
module = safe_getattr(cls, '__module__')
qualname = safe_getattr(cls, '__qualname__')
analyzer = ModuleAnalyzer.for_module(module)
analyzer.analyze()
if qualname and obj_path:
key = (qualname, attrname)
if key in analyzer.attr_docs:
return list(analyzer.attr_docs[key])
except (AttributeError, PycodeError):
pass
return None
def _is_uninitialized_instance_attribute(
*, parent: Any, obj_path: Sequence[str], type_aliases: Mapping[str, str] | None
) -> bool:
"""Check the subject is an annotation only attribute."""
annotations = get_type_hints(parent, None, type_aliases, include_extras=True)
return obj_path[-1] in annotations
| _StubFileLoader |
python | keras-team__keras | keras/src/optimizers/ftrl.py | {
"start": 193,
"end": 8799
} | class ____(optimizer.Optimizer):
r"""Optimizer that implements the FTRL algorithm.
"Follow The Regularized Leader" (FTRL) is an optimization algorithm
developed at Google for click-through rate prediction in the early 2010s. It
is most suitable for shallow models with large and sparse feature spaces.
The algorithm is described by
[McMahan et al., 2013](https://research.google.com/pubs/archive/41159.pdf).
The Keras version has support for both online L2 regularization
(the L2 regularization described in the paper
above) and shrinkage-type L2 regularization
(which is the addition of an L2 penalty to the loss function).
Initialization:
```python
n = 0
sigma = 0
z = 0
```
Update rule for one variable `w`:
```python
prev_n = n
n = n + g ** 2
sigma = (n ** -lr_power - prev_n ** -lr_power) / lr
z = z + g - sigma * w
if abs(z) < lambda_1:
w = 0
else:
w = (sgn(z) * lambda_1 - z) / ((beta + sqrt(n)) / alpha + lambda_2)
```
Notation:
- `lr` is the learning rate
- `g` is the gradient for the variable
- `lambda_1` is the L1 regularization strength
- `lambda_2` is the L2 regularization strength
- `lr_power` is the power to scale n.
Check the documentation for the `l2_shrinkage_regularization_strength`
parameter for more details when shrinkage is enabled, in which case gradient
is replaced with a gradient with shrinkage.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero
for a fixed learning rate.
initial_accumulator_value: The starting value for accumulators. Only
zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or equal
to zero. Defaults to `0.0`.
l2_regularization_strength: A float value, must be greater than or equal
to zero. Defaults to `0.0`.
l2_shrinkage_regularization_strength: A float value, must be greater
than or equal to zero. This differs from L2 above in that the L2
above is a stabilization penalty, whereas this L2 shrinkage is a
magnitude penalty. When input is sparse shrinkage will only happen
on the active weights.
beta: A float value, representing the beta value from the paper.
Defaults to `0.0`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.001,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
l2_shrinkage_regularization_strength=0.0,
beta=0.0,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="ftrl",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
if initial_accumulator_value < 0.0:
raise ValueError(
"`initial_accumulator_value` needs to be positive or zero. "
"Received: initial_accumulator_value="
f"{initial_accumulator_value}."
)
if learning_rate_power > 0.0:
raise ValueError(
"`learning_rate_power` needs to be negative or zero. Received: "
f"learning_rate_power={learning_rate_power}."
)
if l1_regularization_strength < 0.0:
raise ValueError(
"`l1_regularization_strength` needs to be positive or zero. "
"Received: l1_regularization_strength="
f"{l1_regularization_strength}."
)
if l2_regularization_strength < 0.0:
raise ValueError(
"`l2_regularization_strength` needs to be positive or zero. "
"Received: l2_regularization_strength="
f"{l2_regularization_strength}."
)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
"`l2_shrinkage_regularization_strength` needs to be positive "
"or zero. Received: l2_shrinkage_regularization_strength"
f"={l2_shrinkage_regularization_strength}."
)
self.learning_rate_power = learning_rate_power
self.initial_accumulator_value = initial_accumulator_value
self.l1_regularization_strength = l1_regularization_strength
self.l2_regularization_strength = l2_regularization_strength
self.l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength
)
self.beta = beta
def build(self, var_list):
"""Initialize optimizer variables.
Args:
var_list: list of model variables to build Ftrl variables on.
"""
if self.built:
return
super().build(var_list)
accumulator_initializer = initializers.Constant(
self.initial_accumulator_value,
)
self._accumulators, self._linears = self.add_optimizer_variables(
var_list,
["accumulator", "linear"],
initializer=[accumulator_initializer, "zeros"],
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
accum = self._accumulators[self._get_variable_index(variable)]
linear = self._linears[self._get_variable_index(variable)]
lr_power = self.learning_rate_power
l2_reg = self.l2_regularization_strength
l2_reg = l2_reg + self.beta / (2.0 * lr)
grad_to_use = ops.add(
gradient,
ops.multiply(
2 * self.l2_shrinkage_regularization_strength, variable
),
)
new_accum = ops.add(accum, ops.square(gradient))
self.assign_add(
linear,
ops.subtract(
grad_to_use,
ops.multiply(
ops.divide(
ops.subtract(
ops.power(new_accum, -lr_power),
ops.power(accum, -lr_power),
),
lr,
),
variable,
),
),
)
quadratic = ops.add(
ops.divide(ops.power(new_accum, (-lr_power)), lr), 2 * l2_reg
)
linear_clipped = ops.clip(
linear,
-self.l1_regularization_strength,
self.l1_regularization_strength,
)
self.assign(
variable,
ops.divide(ops.subtract(linear_clipped, linear), quadratic),
)
self.assign(accum, new_accum)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate_power": self.learning_rate_power,
"initial_accumulator_value": self.initial_accumulator_value,
"l1_regularization_strength": self.l1_regularization_strength,
"l2_regularization_strength": self.l2_regularization_strength,
"l2_shrinkage_regularization_strength": self.l2_shrinkage_regularization_strength, # noqa: E501
"beta": self.beta,
}
)
return config
Ftrl.__doc__ = Ftrl.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| Ftrl |
python | openai__openai-python | src/openai/types/beta/assistant_stream_event.py | {
"start": 3092,
"end": 3314
} | class ____(BaseModel):
data: Run
"""
Represents an execution run on a
[thread](https://platform.openai.com/docs/api-reference/threads).
"""
event: Literal["thread.run.cancelling"]
| ThreadRunCancelling |
python | h5py__h5py | h5py/tests/test_group.py | {
"start": 24754,
"end": 26371
} | class ____(TestCase):
"""
Feature: The .visit_links and .visititems_links methods allow iterative access to
links contained in the group and its subgroups.
"""
def setUp(self):
self.f = File(self.mktemp(), 'w')
self.groups = [
'grp1', 'grp1/grp11', 'grp1/grp12', 'grp2', 'grp2/grp21', 'grp2/grp21/grp211'
]
self.links = [
'linkto_grp1', 'grp1/linkto_grp11', 'grp1/linkto_grp12', 'linkto_grp2', 'grp2/linkto_grp21', 'grp2/grp21/linkto_grp211'
]
for g, l in zip(self.groups, self.links, strict=True):
self.f.create_group(g)
self.f[l] = SoftLink(f'/{g}')
def tearDown(self):
self.f.close()
def test_visit_links(self):
""" All subgroups and links are visited """
l = []
self.f.visit_links(l.append)
self.assertSameElements(l, self.groups + self.links)
def test_visititems(self):
""" All links are visited """
l = []
comp = [(x, type(self.f.get(x, getlink=True))) for x in self.groups + self.links]
self.f.visititems_links(lambda x, y: l.append((x, type(y))))
self.assertSameElements(comp, l)
def test_bailout(self):
""" Returning a non-None value immediately aborts iteration """
# do not make assumption on iteration order
l = []
x = self.f.visit_links(lambda x: l.append(x) or -1)
assert x == -1 and len(l) == 1
l = []
x = self.f.visititems_links(lambda x, y: l.append((x,y)) or -1)
assert x == -1 and len(l) == 1
| TestVisitLinks |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/exception.py | {
"start": 64,
"end": 329
} | class ____(BaseEvent):
"""
ExceptionEvent.
Args:
exception (BaseException): exception.
"""
exception: BaseException
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "ExceptionEvent"
| ExceptionEvent |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/components/shell-script-component/custom-schema-resolution.py | {
"start": 87,
"end": 295
} | class ____:
def __init__(self, api_key: str): ...
def resolve_api_key(
context: dg.ResolutionContext,
api_key: str,
) -> MyApiClient:
return MyApiClient(api_key=api_key)
@dataclass
| MyApiClient |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 639702,
"end": 640662
} | class ____(VegaLiteSchema):
"""
LookupData schema wrapper.
Parameters
----------
data : dict, :class:`Data`, :class:`UrlData`, :class:`Generator`, :class:`NamedData`, :class:`DataSource`, :class:`InlineData`, :class:`SphereGenerator`, :class:`SequenceGenerator`, :class:`GraticuleGenerator`
Secondary data source to lookup in.
key : str, :class:`FieldName`
Key in data to lookup.
fields : Sequence[str, :class:`FieldName`]
Fields in foreign data or selection to lookup. If not specified, the entire object
is queried.
"""
_schema = {"$ref": "#/definitions/LookupData"}
def __init__(
self,
data: Optional[SchemaBase | ChartDataType | Map] = Undefined,
key: Optional[str | SchemaBase] = Undefined,
fields: Optional[Sequence[str | SchemaBase]] = Undefined,
**kwds,
):
super().__init__(data=data, key=key, fields=fields, **kwds)
| LookupData |
python | django-import-export__django-import-export | tests/core/tests/test_permissions.py | {
"start": 352,
"end": 7008
} | class ____(AdminTestMixin, TestCase):
def setUp(self):
user = User.objects.create_user("admin", "admin@example.com", "password")
user.is_staff = True
user.is_superuser = False
user.save()
self.user = user
self.client.login(username="admin", password="password")
def set_user_model_permission(self, action, model_name):
permission = Permission.objects.get(codename=f"{action}_{model_name}")
self.user.user_permissions.add(permission)
@override_settings(IMPORT_EXPORT_IMPORT_PERMISSION_CODE="change")
def test_import(self):
# user has no permission to import
response = self.client.get(self.book_import_url)
self.assertEqual(response.status_code, 403)
# POST the import form
input_format = "0"
filename = os.path.join(
os.path.dirname(__file__), os.path.pardir, "exports", "books.csv"
)
with open(filename, "rb") as f:
data = {
"format": input_format,
"import_file": f,
}
self._prepend_form_prefix(data)
response = self.client.post(self.book_import_url, data)
self.assertEqual(response.status_code, 403)
response = self.client.post(self.book_process_import_url, {})
self.assertEqual(response.status_code, 403)
# user has sufficient permission to import
self.set_user_model_permission("change", "book")
response = self.client.get(self.book_import_url)
self.assertEqual(response.status_code, 200)
# POST the import form
input_format = "0"
filename = os.path.join(
os.path.dirname(__file__), os.path.pardir, "exports", "books.csv"
)
with open(filename, "rb") as f:
data = {
"format": input_format,
"import_file": f,
}
self._prepend_form_prefix(data)
response = self.client.post(self.book_import_url, data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context["confirm_form"]
data = confirm_form.initial
self._prepend_form_prefix(data)
response = self.client.post(self.book_process_import_url, data)
self.assertEqual(response.status_code, 302)
@override_settings(IMPORT_EXPORT_EXPORT_PERMISSION_CODE="change")
def test_export_with_permission_set(self):
response = self.client.get(self.book_export_url)
self.assertEqual(response.status_code, 403)
data = {"format": "0"}
response = self.client.post(self.book_export_url, data)
self.assertEqual(response.status_code, 403)
self.set_user_model_permission("change", "book")
response = self.client.get(self.book_export_url)
self.assertEqual(response.status_code, 200)
data = {"format": "0"}
response = self.client.post(self.book_export_url, data)
self.assertEqual(response.status_code, 200)
@override_settings(IMPORT_EXPORT_EXPORT_PERMISSION_CODE="change")
def test_export_action_with_permission_set(self):
self.cat1 = Category.objects.create(name="Cat 1")
data = {
"action": ["export_admin_action"],
"_selected_action": [str(self.cat1.id)],
}
response = self.client.post(self.category_change_url, data)
self.assertEqual(response.status_code, 403)
self.set_user_model_permission("change", "category")
response = self.client.post(self.category_change_url, data)
self.assertEqual(response.status_code, 200)
@override_settings(IMPORT_EXPORT_EXPORT_PERMISSION_CODE="add")
def test_check_export_button(self):
self.set_user_model_permission("change", "book")
response = self.client.get(self.core_book_url)
widget = "import_link"
self.assertIn(widget, response.content.decode())
widget = "export_link"
self.assertNotIn(widget, response.content.decode())
@override_settings(IMPORT_EXPORT_IMPORT_PERMISSION_CODE="add")
def test_check_import_button(self):
self.set_user_model_permission("change", "book")
response = self.client.get(self.core_book_url)
widget = "import_link"
self.assertNotIn(widget, response.content.decode())
widget = "export_link"
self.assertIn(widget, response.content.decode())
@override_settings(IMPORT_EXPORT_EXPORT_PERMISSION_CODE="export")
def test_export_button_for_export_permission(self):
content_type = ContentType.objects.get_for_model(Category)
Permission.objects.create(
codename="export_category",
name="Can export category",
content_type=content_type,
)
self.set_user_model_permission("view", "category")
self.cat1 = Category.objects.create(name="Cat 1")
self.change_url = reverse(
"%s:%s_%s_change"
% (
"admin",
"core",
"category",
),
args=[self.cat1.pk],
)
response = self.client.get(self.change_url)
export_btn = (
'<input type="submit" value="Export" class="default" name="_export-item">'
)
self.assertNotIn(export_btn, response.content.decode())
# add export permission and the button should be displayed
self.set_user_model_permission("export", "category")
response = self.client.get(self.change_url)
self.assertIn(export_btn, response.content.decode())
@override_settings(IMPORT_EXPORT_EXPORT_PERMISSION_CODE="export")
def test_action_dropdown_contains_export_action(self):
content_type = ContentType.objects.get_for_model(Category)
Permission.objects.create(
codename="export_category",
name="Can export category",
content_type=content_type,
)
self.set_user_model_permission("view", "category")
self.cat1 = Category.objects.create(name="Cat 1")
response = self.client.get(self.category_change_url)
export_option = (
'<option value="export_admin_action">Export selected categories</option>'
)
self.assertNotIn(export_option, response.content.decode())
# add export permission and the button should be displayed
self.set_user_model_permission("export", "category")
response = self.client.get(self.category_change_url)
self.assertIn(export_option, response.content.decode())
| ImportExportPermissionTest |
python | plotly__plotly.py | plotly/graph_objs/table/header/_font.py | {
"start": 233,
"end": 17068
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "table.header"
_path_str = "table.header.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.table.header.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.table.header.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.table.header.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/table.py | {
"start": 6133,
"end": 40079
} | class ____(JupyterMixin):
"""A console renderable to draw a table.
Args:
*headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
title (Union[str, Text], optional): The title of the table rendered at the top. Defaults to None.
caption (Union[str, Text], optional): The table caption rendered below. Defaults to None.
width (int, optional): The width in characters of the table, or ``None`` to automatically fit. Defaults to None.
min_width (Optional[int], optional): The minimum width of the table, or ``None`` for no minimum. Defaults to None.
box (box.Box, optional): One of the constants in box.py used to draw the edges (see :ref:`appendix_box`), or ``None`` for no box lines. Defaults to box.HEAVY_HEAD.
safe_box (Optional[bool], optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
padding (PaddingDimensions, optional): Padding for cells (top, right, bottom, left). Defaults to (0, 1).
collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to False.
pad_edge (bool, optional): Enable padding of edge cells. Defaults to True.
expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
show_header (bool, optional): Show a header row. Defaults to True.
show_footer (bool, optional): Show a footer row. Defaults to False.
show_edge (bool, optional): Draw a box around the outside of the table. Defaults to True.
show_lines (bool, optional): Draw lines between every row. Defaults to False.
leading (int, optional): Number of blank lines between rows (precludes ``show_lines``). Defaults to 0.
style (Union[str, Style], optional): Default style for the table. Defaults to "none".
row_styles (List[Union, str], optional): Optional list of row styles, if more than one style is given then the styles will alternate. Defaults to None.
header_style (Union[str, Style], optional): Style of the header. Defaults to "table.header".
footer_style (Union[str, Style], optional): Style of the footer. Defaults to "table.footer".
border_style (Union[str, Style], optional): Style of the border. Defaults to None.
title_style (Union[str, Style], optional): Style of the title. Defaults to None.
caption_style (Union[str, Style], optional): Style of the caption. Defaults to None.
title_justify (str, optional): Justify method for title. Defaults to "center".
caption_justify (str, optional): Justify method for caption. Defaults to "center".
highlight (bool, optional): Highlight cell contents (if str). Defaults to False.
"""
columns: List[Column]
rows: List[Row]
def __init__(
self,
*headers: Union[Column, str],
title: Optional[TextType] = None,
caption: Optional[TextType] = None,
width: Optional[int] = None,
min_width: Optional[int] = None,
box: Optional[box.Box] = box.HEAVY_HEAD,
safe_box: Optional[bool] = None,
padding: PaddingDimensions = (0, 1),
collapse_padding: bool = False,
pad_edge: bool = True,
expand: bool = False,
show_header: bool = True,
show_footer: bool = False,
show_edge: bool = True,
show_lines: bool = False,
leading: int = 0,
style: StyleType = "none",
row_styles: Optional[Iterable[StyleType]] = None,
header_style: Optional[StyleType] = "table.header",
footer_style: Optional[StyleType] = "table.footer",
border_style: Optional[StyleType] = None,
title_style: Optional[StyleType] = None,
caption_style: Optional[StyleType] = None,
title_justify: "JustifyMethod" = "center",
caption_justify: "JustifyMethod" = "center",
highlight: bool = False,
) -> None:
self.columns: List[Column] = []
self.rows: List[Row] = []
self.title = title
self.caption = caption
self.width = width
self.min_width = min_width
self.box = box
self.safe_box = safe_box
self._padding = Padding.unpack(padding)
self.pad_edge = pad_edge
self._expand = expand
self.show_header = show_header
self.show_footer = show_footer
self.show_edge = show_edge
self.show_lines = show_lines
self.leading = leading
self.collapse_padding = collapse_padding
self.style = style
self.header_style = header_style or ""
self.footer_style = footer_style or ""
self.border_style = border_style
self.title_style = title_style
self.caption_style = caption_style
self.title_justify: "JustifyMethod" = title_justify
self.caption_justify: "JustifyMethod" = caption_justify
self.highlight = highlight
self.row_styles: Sequence[StyleType] = list(row_styles or [])
append_column = self.columns.append
for header in headers:
if isinstance(header, str):
self.add_column(header=header)
else:
header._index = len(self.columns)
append_column(header)
@classmethod
def grid(
cls,
*headers: Union[Column, str],
padding: PaddingDimensions = 0,
collapse_padding: bool = True,
pad_edge: bool = False,
expand: bool = False,
) -> "Table":
"""Get a table with no lines, headers, or footer.
Args:
*headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
padding (PaddingDimensions, optional): Get padding around cells. Defaults to 0.
collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to True.
pad_edge (bool, optional): Enable padding around edges of table. Defaults to False.
expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
Returns:
Table: A table instance.
"""
return cls(
*headers,
box=None,
padding=padding,
collapse_padding=collapse_padding,
show_header=False,
show_footer=False,
show_edge=False,
pad_edge=pad_edge,
expand=expand,
)
@property
def expand(self) -> bool:
"""Setting a non-None self.width implies expand."""
return self._expand or self.width is not None
@expand.setter
def expand(self, expand: bool) -> None:
"""Set expand."""
self._expand = expand
@property
def _extra_width(self) -> int:
"""Get extra width to add to cell content."""
width = 0
if self.box and self.show_edge:
width += 2
if self.box:
width += len(self.columns) - 1
return width
@property
def row_count(self) -> int:
"""Get the current number of rows."""
return len(self.rows)
def get_row_style(self, console: "Console", index: int) -> StyleType:
"""Get the current row style."""
style = Style.null()
if self.row_styles:
style += console.get_style(self.row_styles[index % len(self.row_styles)])
row_style = self.rows[index].style
if row_style is not None:
style += console.get_style(row_style)
return style
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
max_width = options.max_width
if self.width is not None:
max_width = self.width
if max_width < 0:
return Measurement(0, 0)
extra_width = self._extra_width
max_width = sum(
self._calculate_column_widths(
console, options.update_width(max_width - extra_width)
)
)
_measure_column = self._measure_column
measurements = [
_measure_column(console, options.update_width(max_width), column)
for column in self.columns
]
minimum_width = (
sum(measurement.minimum for measurement in measurements) + extra_width
)
maximum_width = (
sum(measurement.maximum for measurement in measurements) + extra_width
if (self.width is None)
else self.width
)
measurement = Measurement(minimum_width, maximum_width)
measurement = measurement.clamp(self.min_width)
return measurement
@property
def padding(self) -> Tuple[int, int, int, int]:
"""Get cell padding."""
return self._padding
@padding.setter
def padding(self, padding: PaddingDimensions) -> "Table":
"""Set cell padding."""
self._padding = Padding.unpack(padding)
return self
def add_column(
self,
header: "RenderableType" = "",
footer: "RenderableType" = "",
*,
header_style: Optional[StyleType] = None,
highlight: Optional[bool] = None,
footer_style: Optional[StyleType] = None,
style: Optional[StyleType] = None,
justify: "JustifyMethod" = "left",
vertical: "VerticalAlignMethod" = "top",
overflow: "OverflowMethod" = "ellipsis",
width: Optional[int] = None,
min_width: Optional[int] = None,
max_width: Optional[int] = None,
ratio: Optional[int] = None,
no_wrap: bool = False,
) -> None:
"""Add a column to the table.
Args:
header (RenderableType, optional): Text or renderable for the header.
Defaults to "".
footer (RenderableType, optional): Text or renderable for the footer.
Defaults to "".
header_style (Union[str, Style], optional): Style for the header, or None for default. Defaults to None.
highlight (bool, optional): Whether to highlight the text. The default of None uses the value of the table (self) object.
footer_style (Union[str, Style], optional): Style for the footer, or None for default. Defaults to None.
style (Union[str, Style], optional): Style for the column cells, or None for default. Defaults to None.
justify (JustifyMethod, optional): Alignment for cells. Defaults to "left".
vertical (VerticalAlignMethod, optional): Vertical alignment, one of "top", "middle", or "bottom". Defaults to "top".
overflow (OverflowMethod): Overflow method: "crop", "fold", "ellipsis". Defaults to "ellipsis".
width (int, optional): Desired width of column in characters, or None to fit to contents. Defaults to None.
min_width (Optional[int], optional): Minimum width of column, or ``None`` for no minimum. Defaults to None.
max_width (Optional[int], optional): Maximum width of column, or ``None`` for no maximum. Defaults to None.
ratio (int, optional): Flexible ratio for the column (requires ``Table.expand`` or ``Table.width``). Defaults to None.
no_wrap (bool, optional): Set to ``True`` to disable wrapping of this column.
"""
column = Column(
_index=len(self.columns),
header=header,
footer=footer,
header_style=header_style or "",
highlight=highlight if highlight is not None else self.highlight,
footer_style=footer_style or "",
style=style or "",
justify=justify,
vertical=vertical,
overflow=overflow,
width=width,
min_width=min_width,
max_width=max_width,
ratio=ratio,
no_wrap=no_wrap,
)
self.columns.append(column)
def add_row(
self,
*renderables: Optional["RenderableType"],
style: Optional[StyleType] = None,
end_section: bool = False,
) -> None:
"""Add a row of renderables.
Args:
*renderables (None or renderable): Each cell in a row must be a renderable object (including str),
or ``None`` for a blank cell.
style (StyleType, optional): An optional style to apply to the entire row. Defaults to None.
end_section (bool, optional): End a section and draw a line. Defaults to False.
Raises:
errors.NotRenderableError: If you add something that can't be rendered.
"""
def add_cell(column: Column, renderable: "RenderableType") -> None:
column._cells.append(renderable)
cell_renderables: List[Optional["RenderableType"]] = list(renderables)
columns = self.columns
if len(cell_renderables) < len(columns):
cell_renderables = [
*cell_renderables,
*[None] * (len(columns) - len(cell_renderables)),
]
for index, renderable in enumerate(cell_renderables):
if index == len(columns):
column = Column(_index=index, highlight=self.highlight)
for _ in self.rows:
add_cell(column, Text(""))
self.columns.append(column)
else:
column = columns[index]
if renderable is None:
add_cell(column, "")
elif is_renderable(renderable):
add_cell(column, renderable)
else:
raise errors.NotRenderableError(
f"unable to render {type(renderable).__name__}; a string or other renderable object is required"
)
self.rows.append(Row(style=style, end_section=end_section))
def add_section(self) -> None:
"""Add a new section (draw a line after current row)."""
if self.rows:
self.rows[-1].end_section = True
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if not self.columns:
yield Segment("\n")
return
max_width = options.max_width
if self.width is not None:
max_width = self.width
extra_width = self._extra_width
widths = self._calculate_column_widths(
console, options.update_width(max_width - extra_width)
)
table_width = sum(widths) + extra_width
render_options = options.update(
width=table_width, highlight=self.highlight, height=None
)
def render_annotation(
text: TextType, style: StyleType, justify: "JustifyMethod" = "center"
) -> "RenderResult":
render_text = (
console.render_str(text, style=style, highlight=False)
if isinstance(text, str)
else text
)
return console.render(
render_text, options=render_options.update(justify=justify)
)
if self.title:
yield from render_annotation(
self.title,
style=Style.pick_first(self.title_style, "table.title"),
justify=self.title_justify,
)
yield from self._render(console, render_options, widths)
if self.caption:
yield from render_annotation(
self.caption,
style=Style.pick_first(self.caption_style, "table.caption"),
justify=self.caption_justify,
)
def _calculate_column_widths(
self, console: "Console", options: "ConsoleOptions"
) -> List[int]:
"""Calculate the widths of each column, including padding, not including borders."""
max_width = options.max_width
columns = self.columns
width_ranges = [
self._measure_column(console, options, column) for column in columns
]
widths = [_range.maximum or 1 for _range in width_ranges]
get_padding_width = self._get_padding_width
extra_width = self._extra_width
if self.expand:
ratios = [col.ratio or 0 for col in columns if col.flexible]
if any(ratios):
fixed_widths = [
0 if column.flexible else _range.maximum
for _range, column in zip(width_ranges, columns)
]
flex_minimum = [
(column.width or 1) + get_padding_width(column._index)
for column in columns
if column.flexible
]
flexible_width = max_width - sum(fixed_widths)
flex_widths = ratio_distribute(flexible_width, ratios, flex_minimum)
iter_flex_widths = iter(flex_widths)
for index, column in enumerate(columns):
if column.flexible:
widths[index] = fixed_widths[index] + next(iter_flex_widths)
table_width = sum(widths)
if table_width > max_width:
widths = self._collapse_widths(
widths,
[(column.width is None and not column.no_wrap) for column in columns],
max_width,
)
table_width = sum(widths)
# last resort, reduce columns evenly
if table_width > max_width:
excess_width = table_width - max_width
widths = ratio_reduce(excess_width, [1] * len(widths), widths, widths)
table_width = sum(widths)
width_ranges = [
self._measure_column(console, options.update_width(width), column)
for width, column in zip(widths, columns)
]
widths = [_range.maximum or 0 for _range in width_ranges]
if (table_width < max_width and self.expand) or (
self.min_width is not None and table_width < (self.min_width - extra_width)
):
_max_width = (
max_width
if self.min_width is None
else min(self.min_width - extra_width, max_width)
)
pad_widths = ratio_distribute(_max_width - table_width, widths)
widths = [_width + pad for _width, pad in zip(widths, pad_widths)]
return widths
@classmethod
def _collapse_widths(
cls, widths: List[int], wrapable: List[bool], max_width: int
) -> List[int]:
"""Reduce widths so that the total is under max_width.
Args:
widths (List[int]): List of widths.
wrapable (List[bool]): List of booleans that indicate if a column may shrink.
max_width (int): Maximum width to reduce to.
Returns:
List[int]: A new list of widths.
"""
total_width = sum(widths)
excess_width = total_width - max_width
if any(wrapable):
while total_width and excess_width > 0:
max_column = max(
width for width, allow_wrap in zip(widths, wrapable) if allow_wrap
)
second_max_column = max(
width if allow_wrap and width != max_column else 0
for width, allow_wrap in zip(widths, wrapable)
)
column_difference = max_column - second_max_column
ratios = [
(1 if (width == max_column and allow_wrap) else 0)
for width, allow_wrap in zip(widths, wrapable)
]
if not any(ratios) or not column_difference:
break
max_reduce = [min(excess_width, column_difference)] * len(widths)
widths = ratio_reduce(excess_width, ratios, max_reduce, widths)
total_width = sum(widths)
excess_width = total_width - max_width
return widths
def _get_cells(
self, console: "Console", column_index: int, column: Column
) -> Iterable[_Cell]:
"""Get all the cells with padding and optional header."""
collapse_padding = self.collapse_padding
pad_edge = self.pad_edge
padding = self.padding
any_padding = any(padding)
first_column = column_index == 0
last_column = column_index == len(self.columns) - 1
_padding_cache: Dict[Tuple[bool, bool], Tuple[int, int, int, int]] = {}
def get_padding(first_row: bool, last_row: bool) -> Tuple[int, int, int, int]:
cached = _padding_cache.get((first_row, last_row))
if cached:
return cached
top, right, bottom, left = padding
if collapse_padding:
if not first_column:
left = max(0, left - right)
if not last_row:
bottom = max(0, top - bottom)
if not pad_edge:
if first_column:
left = 0
if last_column:
right = 0
if first_row:
top = 0
if last_row:
bottom = 0
_padding = (top, right, bottom, left)
_padding_cache[(first_row, last_row)] = _padding
return _padding
raw_cells: List[Tuple[StyleType, "RenderableType"]] = []
_append = raw_cells.append
get_style = console.get_style
if self.show_header:
header_style = get_style(self.header_style or "") + get_style(
column.header_style
)
_append((header_style, column.header))
cell_style = get_style(column.style or "")
for cell in column.cells:
_append((cell_style, cell))
if self.show_footer:
footer_style = get_style(self.footer_style or "") + get_style(
column.footer_style
)
_append((footer_style, column.footer))
if any_padding:
_Padding = Padding
for first, last, (style, renderable) in loop_first_last(raw_cells):
yield _Cell(
style,
_Padding(renderable, get_padding(first, last)),
getattr(renderable, "vertical", None) or column.vertical,
)
else:
for style, renderable in raw_cells:
yield _Cell(
style,
renderable,
getattr(renderable, "vertical", None) or column.vertical,
)
def _get_padding_width(self, column_index: int) -> int:
"""Get extra width from padding."""
_, pad_right, _, pad_left = self.padding
if self.collapse_padding:
if column_index > 0:
pad_left = max(0, pad_left - pad_right)
return pad_left + pad_right
def _measure_column(
self,
console: "Console",
options: "ConsoleOptions",
column: Column,
) -> Measurement:
"""Get the minimum and maximum width of the column."""
max_width = options.max_width
if max_width < 1:
return Measurement(0, 0)
padding_width = self._get_padding_width(column._index)
if column.width is not None:
# Fixed width column
return Measurement(
column.width + padding_width, column.width + padding_width
).with_maximum(max_width)
# Flexible column, we need to measure contents
min_widths: List[int] = []
max_widths: List[int] = []
append_min = min_widths.append
append_max = max_widths.append
get_render_width = Measurement.get
for cell in self._get_cells(console, column._index, column):
_min, _max = get_render_width(console, options, cell.renderable)
append_min(_min)
append_max(_max)
measurement = Measurement(
max(min_widths) if min_widths else 1,
max(max_widths) if max_widths else max_width,
).with_maximum(max_width)
measurement = measurement.clamp(
None if column.min_width is None else column.min_width + padding_width,
None if column.max_width is None else column.max_width + padding_width,
)
return measurement
def _render(
self, console: "Console", options: "ConsoleOptions", widths: List[int]
) -> "RenderResult":
table_style = console.get_style(self.style or "")
border_style = table_style + console.get_style(self.border_style or "")
_column_cells = (
self._get_cells(console, column_index, column)
for column_index, column in enumerate(self.columns)
)
row_cells: List[Tuple[_Cell, ...]] = list(zip(*_column_cells))
_box = (
self.box.substitute(
options, safe=pick_bool(self.safe_box, console.safe_box)
)
if self.box
else None
)
_box = _box.get_plain_headed_box() if _box and not self.show_header else _box
new_line = Segment.line()
columns = self.columns
show_header = self.show_header
show_footer = self.show_footer
show_edge = self.show_edge
show_lines = self.show_lines
leading = self.leading
_Segment = Segment
if _box:
box_segments = [
(
_Segment(_box.head_left, border_style),
_Segment(_box.head_right, border_style),
_Segment(_box.head_vertical, border_style),
),
(
_Segment(_box.mid_left, border_style),
_Segment(_box.mid_right, border_style),
_Segment(_box.mid_vertical, border_style),
),
(
_Segment(_box.foot_left, border_style),
_Segment(_box.foot_right, border_style),
_Segment(_box.foot_vertical, border_style),
),
]
if show_edge:
yield _Segment(_box.get_top(widths), border_style)
yield new_line
else:
box_segments = []
get_row_style = self.get_row_style
get_style = console.get_style
for index, (first, last, row_cell) in enumerate(loop_first_last(row_cells)):
header_row = first and show_header
footer_row = last and show_footer
row = (
self.rows[index - show_header]
if (not header_row and not footer_row)
else None
)
max_height = 1
cells: List[List[List[Segment]]] = []
if header_row or footer_row:
row_style = Style.null()
else:
row_style = get_style(
get_row_style(console, index - 1 if show_header else index)
)
for width, cell, column in zip(widths, row_cell, columns):
render_options = options.update(
width=width,
justify=column.justify,
no_wrap=column.no_wrap,
overflow=column.overflow,
height=None,
highlight=column.highlight,
)
lines = console.render_lines(
cell.renderable,
render_options,
style=get_style(cell.style) + row_style,
)
max_height = max(max_height, len(lines))
cells.append(lines)
row_height = max(len(cell) for cell in cells)
def align_cell(
cell: List[List[Segment]],
vertical: "VerticalAlignMethod",
width: int,
style: Style,
) -> List[List[Segment]]:
if header_row:
vertical = "bottom"
elif footer_row:
vertical = "top"
if vertical == "top":
return _Segment.align_top(cell, width, row_height, style)
elif vertical == "middle":
return _Segment.align_middle(cell, width, row_height, style)
return _Segment.align_bottom(cell, width, row_height, style)
cells[:] = [
_Segment.set_shape(
align_cell(
cell,
_cell.vertical,
width,
get_style(_cell.style) + row_style,
),
width,
max_height,
)
for width, _cell, cell, column in zip(widths, row_cell, cells, columns)
]
if _box:
if last and show_footer:
yield _Segment(
_box.get_row(widths, "foot", edge=show_edge), border_style
)
yield new_line
left, right, _divider = box_segments[0 if first else (2 if last else 1)]
# If the column divider is whitespace also style it with the row background
divider = (
_divider
if _divider.text.strip()
else _Segment(
_divider.text, row_style.background_style + _divider.style
)
)
for line_no in range(max_height):
if show_edge:
yield left
for last_cell, rendered_cell in loop_last(cells):
yield from rendered_cell[line_no]
if not last_cell:
yield divider
if show_edge:
yield right
yield new_line
else:
for line_no in range(max_height):
for rendered_cell in cells:
yield from rendered_cell[line_no]
yield new_line
if _box and first and show_header:
yield _Segment(
_box.get_row(widths, "head", edge=show_edge), border_style
)
yield new_line
end_section = row and row.end_section
if _box and (show_lines or leading or end_section):
if (
not last
and not (show_footer and index >= len(row_cells) - 2)
and not (show_header and header_row)
):
if leading:
yield _Segment(
_box.get_row(widths, "mid", edge=show_edge) * leading,
border_style,
)
else:
yield _Segment(
_box.get_row(widths, "row", edge=show_edge), border_style
)
yield new_line
if _box and show_edge:
yield _Segment(_box.get_bottom(widths), border_style)
yield new_line
if __name__ == "__main__": # pragma: no cover
from pipenv.patched.pip._vendor.rich.console import Console
from pipenv.patched.pip._vendor.rich.highlighter import ReprHighlighter
from ._timer import timer
with timer("Table render"):
table = Table(
title="Star Wars Movies",
caption="Rich example table",
caption_justify="right",
)
table.add_column(
"Released", header_style="bright_cyan", style="cyan", no_wrap=True
)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row(
"Dec 20, 2019",
"Star Wars: The Rise of Skywalker",
"$952,110,690",
)
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row(
"Dec 15, 2017",
"Star Wars Ep. V111: The Last Jedi",
"$1,332,539,889",
style="on black",
end_section=True,
)
table.add_row(
"Dec 16, 2016",
"Rogue One: A Star Wars Story",
"$1,332,439,889",
)
def header(text: str) -> None:
console.print()
console.rule(highlight(text))
console.print()
console = Console()
highlight = ReprHighlighter()
header("Example Table")
console.print(table, justify="center")
table.expand = True
header("expand=True")
console.print(table)
table.width = 50
header("width=50")
console.print(table, justify="center")
table.width = None
table.expand = False
table.row_styles = ["dim", "none"]
header("row_styles=['dim', 'none']")
console.print(table, justify="center")
table.width = None
table.expand = False
table.row_styles = ["dim", "none"]
table.leading = 1
header("leading=1, row_styles=['dim', 'none']")
console.print(table, justify="center")
table.width = None
table.expand = False
table.row_styles = ["dim", "none"]
table.show_lines = True
table.leading = 0
header("show_lines=True, row_styles=['dim', 'none']")
console.print(table, justify="center")
| Table |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 120597,
"end": 120802
} | class ____(Integer):
"""The SQL INT or INTEGER type.
.. seealso::
:class:`_types.Integer` - documentation for the base type.
"""
__visit_name__ = "INTEGER"
INT = INTEGER
| INTEGER |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/printing_package/package.py | {
"start": 237,
"end": 1139
} | class ____(MakefilePackage):
"""This package prints some output from its install method.
We use this to test whether that output is properly logged.
"""
homepage = "http://www.example.com/printing_package"
url = "http://www.unit-test-should-replace-this-url/trivial_install-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
def install(self, spec, prefix):
print("BEFORE INSTALL")
mkdirp(prefix)
touch(os.path.join(prefix, "dummyfile"))
print("AFTER INSTALL")
def check(self):
"""Run build-time tests."""
print("PRINTING PACKAGE CHECK")
def installcheck(self):
"""Run install-time tests."""
print("PRINTING PACKAGE INSTALLCHECK")
def test_print(self):
"""Test print example."""
print("Running test_print")
print("And a second command")
| PrintingPackage |
python | django__django | django/db/migrations/operations/models.py | {
"start": 30504,
"end": 31349
} | class ____(ModelOptionOperation):
"""Alter the model's managers."""
serialization_expand_args = ["managers"]
def __init__(self, name, managers):
self.managers = managers
super().__init__(name)
def deconstruct(self):
return (self.__class__.__qualname__, [self.name, self.managers], {})
def state_forwards(self, app_label, state):
state.alter_model_managers(app_label, self.name_lower, self.managers)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change managers on %s" % self.name
@property
def migration_name_fragment(self):
return "alter_%s_managers" % self.name_lower
| AlterModelManagers |
python | prabhupant__python-ds | data_structures/bst/sorted_array_to_bst.py | {
"start": 0,
"end": 880
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
# get the middle of the array and make it root.
# middle of the left becomes left child
# middle of the right becomes right child
def sorted_array_to_bst(arr):
if not arr:
return None
mid = len(arr)//2
root = Node(arr[mid])
root.left = sorted_array_to_bst(arr[:mid])
root.right = sorted_array_to_bst(arr[mid+1:])
return root
def inorder(root):
if not root:
return None
stack = []
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print(root.val, end=" ")
root = root.right
arr = [1,2,3,4,5,6,7,8,9]
root = sorted_array_to_bst(arr)
inorder(root)
| Node |
python | kamyu104__LeetCode-Solutions | Python/maximize-distance-to-closest-person.py | {
"start": 30,
"end": 467
} | class ____(object):
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
prev, result = -1, 1
for i in xrange(len(seats)):
if seats[i]:
if prev < 0:
result = i
else:
result = max(result, (i-prev)//2)
prev = i
return max(result, len(seats)-1-prev)
| Solution |
python | python__mypy | mypy/test/teststubtest.py | {
"start": 2033,
"end": 2668
} | class ____(Mapping[str, object]):
__required_keys__: ClassVar[frozenset[str]]
__optional_keys__: ClassVar[frozenset[str]]
__total__: ClassVar[bool]
__readonly_keys__: ClassVar[frozenset[str]]
__mutable_keys__: ClassVar[frozenset[str]]
__closed__: ClassVar[bool | None]
__extra_items__: ClassVar[Any]
def overload(func: _T) -> _T: ...
def type_check_only(func: _T) -> _T: ...
def final(func: _T) -> _T: ...
"""
stubtest_builtins_stub = """
from typing import Generic, Mapping, Sequence, TypeVar, overload
T = TypeVar('T')
T_co = TypeVar('T_co', covariant=True)
KT = TypeVar('KT')
VT = TypeVar('VT')
| _TypedDict |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/data/mapping/base.py | {
"start": 660,
"end": 2052
} | class ____:
def __init__(self):
self.mappers: set[type[Mapper]] = set()
def add(self, mapper: type[Mapper]) -> "MapperConfig":
self.mappers.add(mapper)
return self
def get(self, from_key: str | None = None, to_key: str | None = None) -> type[Mapper] | None:
for mapper in self.mappers:
if mapper.from_key == from_key:
return mapper
if mapper.to_key == to_key:
return mapper
return None
def get_or_create_mapper(
mapper_config: MapperConfig,
mappers: list[Mapper],
from_key: str | None = None,
to_key: str | None = None,
) -> Mapper | None:
# retrieve the mapper type that is applicable for the given key
mapper_class = mapper_config.get(from_key=from_key, to_key=to_key)
# check if a mapper of the type already exists
if mapper_class:
for mapper in mappers:
if mapper_class == type(mapper):
# if a mapper already exists, return the existing mapper
return mapper
else:
# if no mapper exists yet, instantiate the object and append it to the mappers list
mapper_instance = mapper_class()
mappers.append(mapper_instance)
return mapper_instance
else:
# if no mapper is configured for the key, return None
return None
| MapperConfig |
python | huggingface__transformers | src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py | {
"start": 179606,
"end": 203196
} | class ____(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
input_modalities = ("audio", "text")
output_modalities = ("audio", "text")
_tied_weights_keys = {
"lm_head.weight": "shared.weight",
"text_encoder.embed_tokens.weight": "shared.weight",
"text_decoder.embed_tokens.weight": "shared.weight",
}
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.__init__ with SeamlessM4T->SeamlessM4Tv2
def __init__(self, config, current_modality="text"):
r"""
current_modality (`str`, *optional*, defaults to `"text"`):
Default modality. Used to initialize the model.
"""
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.text_encoder = SeamlessM4Tv2Encoder(config)
self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config)
self.text_decoder = SeamlessM4Tv2Decoder(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.current_modality = current_modality
if current_modality == "speech":
self.main_input_name = "input_features"
# these models already call post_init in their initialization
self.t2u_model = SeamlessM4Tv2TextToUnitForConditionalGeneration(config)
self.vocoder = SeamlessM4Tv2CodeHifiGan(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.set_modality
def set_modality(self, modality="text"):
if modality == "text":
self.main_input_name = "input_ids"
self.current_modality = "text"
elif modality == "speech":
self.main_input_name = "input_features"
self.current_modality = "speech"
else:
raise ValueError(f"`modality={modality}` is not a valid modality. It must be `text` or `speech`.")
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.get_encoder
def get_encoder(self):
if self.current_modality == "text":
return self.text_encoder
else:
return self.speech_encoder
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.get_input_embeddings
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.set_input_embeddings
def set_input_embeddings(self, value):
self.text_encoder.embed_tokens = value
self.text_decoder.embed_tokens = value
self.shared = value
@auto_docstring(custom_args=SEAMLESS_M4T_V2_COMMON_CUSTOM_ARGS)
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TModel.forward with SeamlessM4T->SeamlessM4Tv2
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
input_features: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
if input_ids is None and input_features is None and inputs_embeds is None and encoder_outputs is None:
raise ValueError(
"`input_ids`,`input_features`, `inputs_embeds` and `encoder_outputs` are all empty. Make sure at least one of them is not."
)
elif input_features is not None:
if input_ids is not None:
logger.warning(
"`input_ids` is not `None` but `input_features` has been given."
"`input_features` will be used in priority through the `speech_encoder`. "
"Make sure that `input_features` and `input_ids` are mutually exclusive."
)
if inputs_embeds is not None:
logger.warning(
"`inputs_embeds` is not `None` but `input_features` has been given."
"`input_features` will be used in priority through `speech_encoder`. "
"`inputs_embeds` will be ignored."
)
# if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
logger.warning(
"This calls the same method `forward` as `SeamlessM4Tv2ForTextToText` and `SeamlessM4Tv2ForSpeechToText`"
"depending on the input modality. If you want to generate speech, use the `generate` method."
)
self.set_modality("speech")
encoder_outputs = self.speech_encoder(
input_features=input_features,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif input_ids is not None or inputs_embeds is not None:
# if encoder_outputs is not None, it's probably used within a .generate method so no need to warn
logger.warning(
"This calls the same method `forward` as `SeamlessM4Tv2ForTextToText` and `SeamlessM4Tv2ForSpeechToText`"
"depending on the input modality. If you want to generate speech, use the `generate` method."
)
self.set_modality("text")
encoder_outputs = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
encoder_attention_mask = attention_mask
# input modality = speech so new attention mask
if self.current_modality == "speech" and attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
encoder_outputs[0].device
)
encoder_attention_mask = _compute_new_attention_mask(
hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.text_decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.Tensor] = None,
input_features: Optional[torch.Tensor] = None,
return_intermediate_token_ids: Optional[bool] = None,
tgt_lang: Optional[str] = None,
speaker_id: Optional[int] = 0,
generate_speech: Optional[bool] = True,
**kwargs,
) -> Union[torch.Tensor, SeamlessM4Tv2GenerationOutput]:
"""
Generates translated token ids and/or translated audio waveforms.
<Tip>
This method successively calls the `.generate` function of two different sub-models. You can specify keyword
arguments at two different levels: general arguments that will be passed to both models, or prefixed arguments
that will be passed to one of them.
For example, calling `.generate(input_ids=input_ids, num_beams=4, speech_do_sample=True)` will successively
perform beam-search decoding on the text model, and multinomial beam-search sampling on the speech model.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4TTokenizer`] or [`SeamlessM4TProcessor`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`, *optional*):
Input audio features. This should be returned by the [`SeamlessM4TFeatureExtractor`] class or the
[`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
return_intermediate_token_ids (`bool`, *optional*):
If `True`, also returns the intermediate generated text and unit tokens. Set to `True` if you also want
to get translated text alongside the audio.
Note that if `generate_speech=False`, this parameter will be ignored and
the text tokens are returned.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
speaker_id (`int`, *optional*, defaults to 0):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
generate_speech (`bool`, *optional*, defaults to `True`):
If `False`, will only returns the text tokens and won't generate speech.
kwargs (*optional*):
Remaining dictioy of keyword arguments that will be passed to [`GenerationMixin.generate`]. Keyword
arguments are of two types:
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model,
except for `decoder_input_ids` which will only be passed through the text components.
- With a *text_* or *speech_* prefix, they will be input for the `generate` method of the
text model and speech model respectively. It has the priority over the keywords without a prefix.
This means you can, for example, specify a generation strategy for one generation but not for the
other.
Returns:
`Union[SeamlessM4Tv2GenerationOutput, tuple[Tensor], ModelOutput]`:
- If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
- If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of
shape `(batch_size, sequence_length)` and `waveform_lengths` which gives the length of each sample.
- If `generate_speech=False`, it will returns `ModelOutput`.
"""
if input_ids is None and input_features is None and kwargs.get("inputs_embeds") is None:
raise ValueError(
"`input_ids`,`input_features` and `inputs_embeds` are all empty. Make sure at least one of them is not."
)
if generate_speech and tgt_lang is None:
raise ValueError("You must specify a `tgt_lang` to generate translated speech.")
if tgt_lang is not None:
# also accept __xxx__
tgt_lang = tgt_lang.replace("__", "")
if generate_speech:
keys_to_check = ["text_decoder_lang_to_code_id", "t2u_lang_code_to_id", "vocoder_lang_code_to_id"]
else:
keys_to_check = ["text_decoder_lang_to_code_id"]
for key in keys_to_check:
lang_code_to_id = getattr(self.generation_config, key, None)
if lang_code_to_id is None:
raise ValueError(
f"""This model generation config doesn't have a `{key}` key which maps the target language
to the right token id. Make sure to load the right generation config."""
)
elif tgt_lang not in lang_code_to_id:
raise ValueError(
f"""`tgt_lang={tgt_lang}` is not supported by this model.
Please specify a `tgt_lang` in {",".join(lang_code_to_id.keys())}. Note that SeamlessM4Tv2 supports
more languages for text translation than for speech synthesis."""
)
batch_size = (
len(input_features)
if input_features is not None
else (len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")))
)
kwargs_text, kwargs_speech = format_speech_generation_kwargs(kwargs)
kwargs_text["output_hidden_states"] = True
kwargs_text["return_dict_in_generate"] = True
kwargs_text["output_scores"] = True
text_decoder_input_ids = kwargs_text.get("decoder_input_ids")
# overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
if tgt_lang is not None:
# tgt_lang gets priority over decoder input ids
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
kwargs_text["decoder_input_ids"] = text_decoder_input_ids
# first generation
if input_features is not None:
self.set_modality("speech")
if input_ids is not None:
logger.warning(
"`input_features` and `input_ids` are both non empty. `input_features` will be used in priority "
"through the speech encoder. Make sure `input_features=None` if you want to use the text encoder."
)
text_generation_output = super().generate(input_features=input_features, **kwargs_text)
else:
self.set_modality("text")
text_generation_output = super().generate(input_ids=input_ids, input_features=None, **kwargs_text)
sequences = text_generation_output.sequences
if not generate_speech:
return text_generation_output
# prepare second generation
num_return_sequences = len(sequences) // batch_size
attention_mask = kwargs_speech.get("attention_mask", kwargs_text.get("attention_mask", None))
# get encoder last hidden states
if self.current_modality == "speech":
# get last_hidden_state from encoder - must do a pass through the speech encoder
encoder_hidden_states = self.speech_encoder(
input_features=input_features, attention_mask=attention_mask
).last_hidden_state
# input modality = speech so new attention mask for the decoder
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
encoder_hidden_states.device
)
attention_mask = _compute_new_attention_mask(
hidden_states=encoder_hidden_states, seq_lens=sub_sampled_lengths
)
else:
encoder_hidden_states = text_generation_output.encoder_hidden_states[-1]
if attention_mask is not None:
# repeat attention mask alongside batch dimension
attention_mask = torch.repeat_interleave(attention_mask, num_return_sequences, dim=0)
# repeat attention mask alongside batch dimension
encoder_hidden_states = torch.repeat_interleave(encoder_hidden_states, num_return_sequences, dim=0)
# get decoder last hidden state - must do a pass through the text decoder
t2u_input_embeds = self.text_decoder(
input_ids=sequences[:, :-1], # Manually trim the final EOS token
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
).last_hidden_state
pad_token_id = self.generation_config.pad_token_id
# Compute new attention mask
seq_lens = (sequences[:, :-1] != pad_token_id).int().sum(1)
t2u_model_attention_mask = _compute_new_attention_mask(t2u_input_embeds, seq_lens)
kwargs_speech["attention_mask"] = t2u_model_attention_mask
# REMOVE EOS and lang_id
t2u_input_ids = sequences[:, 2:-1]
# replace every other EOS
t2u_input_ids = torch.masked_fill(
t2u_input_ids, t2u_input_ids == self.generation_config.eos_token_id, pad_token_id
)
# compute t2u_char_input_ids
t2u_subwords = self._indices_to_subwords(t2u_input_ids)
t2u_char_count_per_id = self._count_character_length_in_subword(
t2u_input_ids, t2u_subwords, pad_token_id=pad_token_id
)
# Add pads for lang, EOS tokens as per NLLB "source" tokenizer mode.
pad_zero = t2u_char_count_per_id.new_zeros((t2u_char_count_per_id.shape[0], 1))
t2u_char_count_per_id = torch.cat([pad_zero, t2u_char_count_per_id, pad_zero], dim=1)
t2u_char_input_ids = self._get_char_input_ids(
t2u_input_ids, t2u_subwords, t2u_char_count_per_id, pad_token_id=pad_token_id
)
# second pass
t2u_output = self.t2u_model(
inputs_embeds=t2u_input_embeds,
char_input_ids=t2u_char_input_ids,
char_count_per_id=t2u_char_count_per_id,
**kwargs_speech,
)
t2u_logits = t2u_output[0]
padding_mask = t2u_output[1].bool()
# The text-to-unit model is non auto-regressive. We keep the ability to use sampling with temperature
temperature = kwargs_speech.get("temperature", None)
if (temperature is None or temperature == 1.0) or not kwargs_speech.get("do_sample", False):
unit_ids = t2u_logits.argmax(dim=-1)
else:
t2u_logits = t2u_logits / temperature
# apply softmax
probs = nn.functional.softmax(t2u_logits, dim=-1)
# reshape to 2D: (batch_size, seq_len, t2u_vocab_size) -> (batch_size*seq_len, t2u_vocab_size)
probs = probs.reshape((-1, probs.shape[2]))
# multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len)
unit_ids = torch.multinomial(probs, num_samples=1).view(t2u_logits.shape[0], -1)
output_unit_ids = unit_ids.detach().clone()
replace_mask = (unit_ids == self.config.t2u_eos_token_id) | (~padding_mask)
# replace eos per pad
unit_ids = unit_ids.masked_fill(replace_mask, self.config.t2u_pad_token_id)
# offset of control symbols
unit_ids = torch.where(
unit_ids == self.config.t2u_pad_token_id, unit_ids, unit_ids - self.config.vocoder_offset
)
vocoder_tgt_lang_id = self.generation_config.vocoder_lang_code_to_id.get(tgt_lang)
vocoder_tgt_lang_id = torch.tensor([[vocoder_tgt_lang_id]] * len(unit_ids), device=self.device)
speaker_id = torch.tensor([[speaker_id]] * len(unit_ids), device=self.device)
waveform, waveform_lengths = self.vocoder(
input_ids=unit_ids, speaker_id=speaker_id, lang_id=vocoder_tgt_lang_id
)
if return_intermediate_token_ids:
return SeamlessM4Tv2GenerationOutput(
waveform=waveform,
waveform_lengths=waveform_lengths,
sequences=sequences,
unit_sequences=output_unit_ids,
)
return waveform, waveform_lengths
__all__ = [
"SeamlessM4Tv2ForTextToSpeech",
"SeamlessM4Tv2ForSpeechToSpeech",
"SeamlessM4Tv2ForTextToText",
"SeamlessM4Tv2ForSpeechToText",
"SeamlessM4Tv2Model",
"SeamlessM4Tv2PreTrainedModel",
]
| SeamlessM4Tv2Model |
python | pypa__warehouse | warehouse/legacy/api/xmlrpc/cache/services.py | {
"start": 1877,
"end": 2416
} | class ____:
def __init__(self, url, purger, **kwargs):
self._purger = purger
@classmethod
def create_service(cls, context, request):
return cls(
request.registry.settings.get("warehouse.xmlrpc.cache.url"),
request.task(purge_tag).delay,
)
def fetch(self, func, args, kwargs, key, tag, expires):
return func(*args, **kwargs)
def purge(self, tag):
return
def purge_tags(self, tags):
for tag in tags:
self._purger(tag)
| NullXMLRPCCache |
python | pytorch__pytorch | torch/nn/modules/padding.py | {
"start": 10018,
"end": 12265
} | class ____(_ConstantPadNd):
r"""Pads the input tensor boundaries with a constant value.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = nn.ConstantPad2d(2, 3.5)
>>> input = torch.randn(1, 2, 2)
>>> input
tensor([[[ 1.6585, 0.4320],
[-0.8701, -0.4649]]])
>>> m(input)
tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],
[ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
>>> # using different paddings for different sides
>>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
>>> m(input)
tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
[ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
[ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
"""
__constants__ = ["padding", "value"]
# pyrefly: ignore [bad-override]
padding: tuple[int, int, int, int]
def __init__(self, padding: _size_4_t, value: float) -> None:
super().__init__(value)
self.padding = _quadruple(padding)
| ConstantPad2d |
python | mahmoud__boltons | boltons/ioutils.py | {
"start": 7964,
"end": 10329
} | class ____(SpooledIOBase):
"""
SpooledBytesIO is a spooled file-like-object that only accepts bytes. On
Python 2.x this means the 'str' type; on Python 3.x this means the 'bytes'
type. Bytes are written in and retrieved exactly as given, but it will
raise TypeErrors if something other than bytes are written.
Example::
>>> from boltons import ioutils
>>> with ioutils.SpooledBytesIO() as f:
... f.write(b"Happy IO")
... _ = f.seek(0)
... isinstance(f.getvalue(), bytes)
True
"""
def read(self, n=-1):
self._checkClosed()
return self.buffer.read(n)
def write(self, s):
self._checkClosed()
if not isinstance(s, bytes):
raise TypeError("bytes expected, got {}".format(
type(s).__name__
))
if self.tell() + len(s) >= self._max_size:
self.rollover()
self.buffer.write(s)
def seek(self, pos, mode=0):
self._checkClosed()
return self.buffer.seek(pos, mode)
def readline(self, length=None):
self._checkClosed()
if length:
return self.buffer.readline(length)
else:
return self.buffer.readline()
def readlines(self, sizehint=0):
return self.buffer.readlines(sizehint)
def rollover(self):
"""Roll the StringIO over to a TempFile"""
if not self._rolled:
tmp = TemporaryFile(dir=self._dir)
pos = self.buffer.tell()
tmp.write(self.buffer.getvalue())
tmp.seek(pos)
self.buffer.close()
self._buffer = tmp
@property
def _rolled(self):
return not isinstance(self.buffer, BytesIO)
@property
def buffer(self):
try:
return self._buffer
except AttributeError:
self._buffer = BytesIO()
return self._buffer
@property
def len(self):
"""Determine the length of the file"""
pos = self.tell()
if self._rolled:
self.seek(0)
val = os.fstat(self.fileno()).st_size
else:
self.seek(0, os.SEEK_END)
val = self.tell()
self.seek(pos)
return val
def tell(self):
self._checkClosed()
return self.buffer.tell()
| SpooledBytesIO |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/trajectory.py | {
"start": 300,
"end": 563
} | class ____(NamedTuple):
"""
Stores observation, action, and reward for an agent. Does not have additional
fields that are present in AgentExperience.
"""
obs: List[np.ndarray]
reward: float
action: ActionTuple
done: bool
| AgentStatus |
python | huggingface__transformers | src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py | {
"start": 2190,
"end": 21923
} | class ____(PreTrainedModel, GenerationMixin):
r"""
[`VisionEncoderDecoderModel`] is a generic model class that will be instantiated as a transformer architecture with
one of the base vision model classes of the library as encoder and another one as decoder when created with the
:meth*~transformers.AutoModel.from_pretrained* class method for the encoder and
:meth*~transformers.AutoModelForCausalLM.from_pretrained* class method for the decoder.
"""
config: VisionEncoderDecoderConfig
base_model_prefix = "vision_encoder_decoder"
main_input_name = "pixel_values"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
def __init__(
self,
config: Optional[PreTrainedConfig] = None,
encoder: Optional[PreTrainedModel] = None,
decoder: Optional[PreTrainedModel] = None,
):
r"""
encoder (`PreTrainedModel`, *optional*):
The encoder model to use.
decoder (`PreTrainedModel`, *optional*):
The decoder model to use.
"""
if config is None and (encoder is None or decoder is None):
raise ValueError("Either a configuration or an encoder and a decoder has to be provided.")
if config is None:
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config)
else:
if not isinstance(config, self.config_class):
raise ValueError(f"Config: {config} has to be of type {self.config_class}")
if config.decoder.cross_attention_hidden_size is not None:
if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size:
raise ValueError(
"If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal"
f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for"
" `config.encoder.hidden_size`."
)
# initialize with config
# make sure input & output embeddings is not tied
config.tie_word_embeddings = False
super().__init__(config)
if encoder is None:
encoder = AutoModel.from_config(config.encoder)
if decoder is None:
decoder = AutoModelForCausalLM.from_config(config.decoder)
self.encoder = encoder
self.decoder = decoder
self._can_compile_fullgraph = decoder._can_compile_fullgraph
if self.encoder.config.to_dict() != self.config.encoder.to_dict():
logger.warning(
f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:"
f" {self.config.encoder}"
)
if self.decoder.config.to_dict() != self.config.decoder.to_dict():
logger.warning(
f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
f" {self.config.decoder}"
)
# make sure that the individual model's config refers to the shared config
# so that the updates to the config will be synced
self.config.encoder._attn_implementation = self.encoder.config._attn_implementation
self.config.decoder._attn_implementation = self.decoder.config._attn_implementation
self.encoder.config = self.config.encoder
self.decoder.config = self.config.decoder
# encoder outputs might need to be projected to different dimension for decoder
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size)
if self.encoder.get_output_embeddings() is not None:
raise ValueError(
f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head"
)
self.post_init()
def get_input_embeddings(self):
return self.decoder.get_input_embeddings()
def get_output_embeddings(self):
return self.decoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.decoder.set_output_embeddings(new_embeddings)
@classmethod
def from_encoder_decoder_pretrained(
cls,
encoder_pretrained_model_name_or_path: Optional[str] = None,
decoder_pretrained_model_name_or_path: Optional[str] = None,
*model_args,
**kwargs,
) -> PreTrainedModel:
r"""
Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model
checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you need to first set it back in training mode with `model.train()`.
Params:
encoder_pretrained_model_name_or_path (`str`, *optional*):
Information necessary to initiate the image encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. An
example is `google/vit-base-patch16-224-in21k`.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the text decoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.
- To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import VisionEncoderDecoderModel
>>> # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized
>>> model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(
... "google/vit-base-patch16-224-in21k", "google-bert/bert-base-uncased"
... )
>>> # saving model after fine-tuning
>>> model.save_pretrained("./vit-bert")
>>> # load fine-tuned model
>>> model = VisionEncoderDecoderModel.from_pretrained("./vit-bert")
```"""
kwargs_encoder = {
argument[len("encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("encoder_")
}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
# remove encoder, decoder kwargs from kwargs
for key in kwargs_encoder:
del kwargs["encoder_" + key]
for key in kwargs_decoder:
del kwargs["decoder_" + key]
# Load and initialize the encoder and decoder
# The distinction between encoder and decoder at the model level is made
# by the value of the flag `is_decoder` that we need to set correctly.
encoder = kwargs_encoder.pop("model", None)
if encoder is None:
if encoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_encoder:
encoder_config, kwargs_encoder = AutoConfig.from_pretrained(
encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True
)
if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
logger.info(
f"Initializing {encoder_pretrained_model_name_or_path} as a encoder model "
"from a decoder model. Cross-attention and causal mask are disabled."
)
encoder_config.is_decoder = False
encoder_config.add_cross_attention = False
kwargs_encoder["config"] = encoder_config
encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)
decoder = kwargs_decoder.pop("model", None)
if decoder is None:
if decoder_pretrained_model_name_or_path is None:
raise ValueError(
"If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
"to be defined."
)
if "config" not in kwargs_decoder:
decoder_config, kwargs_decoder = AutoConfig.from_pretrained(
decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
)
if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
logger.info(
f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
)
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
kwargs_decoder["config"] = decoder_config
if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
logger.warning(
f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
"make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
"passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a "
"`decoder_config` to `.from_encoder_decoder_pretrained(...)`"
)
decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
# instantiate config with corresponding kwargs
config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)
# make sure input & output embeddings is not tied
config.tie_word_embeddings = False
return cls(encoder=encoder, decoder=decoder, config=config)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
encoder_outputs: Optional[tuple[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For training, `decoder_input_ids` are automatically created by the model by shifting the `labels` to the
right, replacing -100 by the `pad_token_id` and prepending them with the `decoder_start_token_id`.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `decoder_input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss for the decoder. Indices should be in `[-100, 0,
..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoProcessor, VisionEncoderDecoderModel
>>> import requests
>>> from PIL import Image
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/trocr-base-handwritten")
>>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten")
>>> # load image from the IAM dataset
>>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
>>> # training
>>> model.config.decoder_start_token_id = processor.tokenizer.eos_token_id
>>> model.config.pad_token_id = processor.tokenizer.pad_token_id
>>> model.config.vocab_size = model.config.decoder.vocab_size
>>> pixel_values = processor(image, return_tensors="pt").pixel_values
>>> text = "hello world"
>>> labels = processor.tokenizer(text, return_tensors="pt").input_ids
>>> outputs = model(pixel_values=pixel_values, labels=labels)
>>> loss = outputs.loss
>>> # inference (generation)
>>> generated_ids = model.generate(pixel_values)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")}
kwargs_decoder = {
argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
}
if encoder_outputs is None:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
encoder_outputs = self.encoder(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs_encoder,
)
elif isinstance(encoder_outputs, tuple):
encoder_outputs = BaseModelOutput(*encoder_outputs)
encoder_hidden_states = encoder_outputs[0]
# optionally project encoder_hidden_states
if (
self.encoder.config.hidden_size != self.decoder.config.hidden_size
and self.decoder.config.cross_attention_hidden_size is None
):
encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
# else:
encoder_attention_mask = None
if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
past_key_values=past_key_values,
return_dict=return_dict,
cache_position=cache_position,
**kwargs_decoder,
)
# Compute loss independent from decoder (as some shift the logits inside them)
loss = None
if labels is not None:
logits = decoder_outputs.logits if return_dict else decoder_outputs[0]
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.reshape(-1))
if not return_dict:
if loss is not None:
return (loss,) + decoder_outputs + encoder_outputs
else:
return decoder_outputs + encoder_outputs
return Seq2SeqLMOutput(
loss=loss,
logits=decoder_outputs.logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
__all__ = ["VisionEncoderDecoderModel"]
| VisionEncoderDecoderModel |
python | joke2k__faker | faker/providers/address/en_NZ/__init__.py | {
"start": 47,
"end": 7138
} | class ____(AddressProvider):
city_prefixes = (
"North",
"East",
"West",
"South",
"New",
"Lake",
"Port",
"Upper",
"Lower",
"High",
"Mount",
)
city_suffixes = (
"town",
"ton",
"land",
"ville",
"berg",
"burgh",
"borough",
"bury",
"burn",
"ing",
"port",
"mouth",
"stone",
"ings",
"mouth",
"fort",
"haven",
"leigh",
"side",
"gate",
"neath",
"side",
" Flats",
" Hill",
)
building_number_formats = ("%##", "%#", "%")
street_suffixes = (
# Most common:
"Arcade",
"Arcade",
"Arcade",
"Avenue",
"Avenue",
"Avenue",
"Avenue",
"Avenue",
"Avenue",
"Avenue",
"Avenue",
"Beach Road",
"Beach Road",
"Beach Road",
"Beach Road",
"Crescent",
"Crescent",
"Crescent",
"Crescent",
"Crescent",
"Drive",
"Drive",
"Drive",
"Drive",
"Mews",
"Mews",
"Mews",
"Place",
"Place",
"Place",
"Place",
"Range Road",
"Range Road",
"Road",
"Road",
"Road",
"Road",
"Road",
"Road",
"Road",
"Road",
"Road",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Street",
"Terrace",
"Terrace",
"Terrace",
"Way",
"Way",
"Way",
# Other:
"Access",
"Alley",
"Alleyway",
"Amble",
"Anchorage",
"Approach",
"Broadway",
"Bypass",
"Causeway",
"Centre",
"Circle",
"Circuit",
"Close",
"Concourse",
"Copse",
"Corner",
"Court",
"Cove",
"Crest",
"Cross",
"Crossing",
"Cutting",
"Esplanade",
"Flats",
"Gardens",
"Grove",
"Heights",
"Highway",
"Lane",
"Line",
"Keys",
"Parade",
"Park",
"Pass",
"Plaza",
"Point",
"Quay",
"Reserve",
"Ridge",
"Rise",
"Square",
"Track",
"Trail",
"View",
)
# Māori nouns commonly present in placenames.
te_reo_parts = (
"ara",
"awa",
"horo",
"kawa",
"koro",
"kowhai",
"manawa",
"mata",
"maunga",
"moko",
"motu",
"ngauru",
"pa" "papa",
"po",
"puke",
"rangi",
"rohe",
"rongo",
"roto",
"tahi",
"tai",
"tangi",
"tau",
"tere",
"tipu",
"wai",
"waka",
"whaka",
"whanga",
"whare",
"weka",
)
# Māori endings (usually adjectives) commonly present in placenames.
te_reo_endings = (
"hanga",
"hope",
"iti",
"iti",
"kiwi",
"makau",
"nui",
"nui",
"nui",
"nuku",
"roa",
"rua",
"tanga",
"tapu",
"toa",
"whenua",
"whero",
"whitu",
)
postcode_formats = (
# as per https://en.wikipedia.org/wiki/Postcodes_in_New_Zealand
# Northland
"0%##",
# Auckland
"1###",
"20##",
"21##",
"22##",
"23##",
"24##",
"25##",
"26##",
# Central North Island
"3###",
"4###",
# Lower North Island
"50##",
"51##",
"52##",
"53##",
"55##",
"57##",
"58##",
# Wellington
"60##",
"61##",
"62##",
"64##",
"69##",
# Upper South Island
"7###",
# Christchurch
"80##",
"81##",
"82##",
"84##",
"85##",
"86##",
"88##",
"89##",
# Southland
"90##",
"92##",
"93##",
"94##",
"95##",
"96##",
"97##",
"98##",
)
city_formats = (
"{{first_name}}{{city_suffix}}",
"{{last_name}}{{city_suffix}}",
"{{last_name}}{{city_suffix}}",
"{{last_name}}{{city_suffix}}",
"{{last_name}}{{city_suffix}}",
"{{last_name}}{{city_suffix}}",
"{{city_prefix}} {{last_name}}{{city_suffix}}",
"{{te_reo_first}}{{te_reo_ending}}",
"{{te_reo_first}}{{te_reo_ending}}",
"{{te_reo_first}}{{te_reo_ending}}",
"{{te_reo_first}}{{te_reo_ending}}",
"{{te_reo_first}}{{te_reo_part}}{{te_reo_ending}}",
"{{te_reo_first}}{{te_reo_part}}{{te_reo_ending}}",
)
street_name_formats = (
"{{first_name}} {{street_suffix}}",
"{{last_name}} {{street_suffix}}",
"{{last_name}} {{street_suffix}}",
"{{last_name}} {{street_suffix}}",
"{{last_name}}-{{last_name}} {{street_suffix}}",
"{{te_reo_first}}{{te_reo_ending}} {{street_suffix}}",
"{{te_reo_first}}{{te_reo_ending}} {{street_suffix}}",
"{{te_reo_first}}{{te_reo_part}}{{te_reo_ending}} {{street_suffix}}",
)
street_address_formats = (
"{{building_number}} {{street_name}}",
"{{building_number}} {{street_name}}",
"{{building_number}} {{street_name}}",
"{{building_number}} {{street_name}}\nRD {{rd_number}}",
"{{secondary_address}}\n{{building_number}} {{street_name}}",
"PO Box {{building_number}}",
)
address_formats = ("{{street_address}}\n{{city}} {{postcode}}",)
secondary_address_formats = (
"Apt. %##",
"Flat %#",
"Suite %##",
"Unit %#",
"Level %",
)
def te_reo_part(self) -> str:
return self.random_element(self.te_reo_parts)
def te_reo_first(self) -> str:
return str(self.random_element(self.te_reo_parts)).capitalize()
def te_reo_ending(self) -> str:
return self.random_element(self.te_reo_parts + self.te_reo_endings)
def city_prefix(self) -> str:
return self.random_element(self.city_prefixes)
def city_suffix(self) -> str:
return self.random_element(self.city_suffixes)
def rd_number(self) -> str:
return self.random_element([str(i) for i in range(1, 11)])
def secondary_address(self) -> str:
return self.numerify(self.random_element(self.secondary_address_formats))
| Provider |
python | pydantic__pydantic | pydantic-core/tests/validators/test_allow_partial.py | {
"start": 3253,
"end": 16449
} | class ____(Mapping):
def __init__(self, d):
self._d = d
def __getitem__(self, key):
return self._d[key]
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
def test_dict():
v = SchemaValidator(core_schema.dict_schema(core_schema.int_schema(), core_schema.int_schema()))
assert v.validate_python({'1': 2, 3: '4'}) == snapshot({1: 2, 3: 4})
assert v.validate_python({'1': 2, 3: '4'}, allow_partial=True) == snapshot({1: 2, 3: 4})
assert v.validate_python(MyMapping({'1': 2, 3: '4'}), allow_partial=True) == snapshot({1: 2, 3: 4})
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'1': 2, 3: 'wrong'})
assert exc_info.value.errors(include_url=False) == snapshot(
[
{
'type': 'int_parsing',
'loc': (3,),
'msg': 'Input should be a valid integer, unable to parse string as an integer',
'input': 'wrong',
}
]
)
assert v.validate_python({'1': 2, 3: 'x'}, allow_partial=True) == snapshot({1: 2})
assert v.validate_python(MyMapping({'1': 2, 3: 'x'}), allow_partial=True) == snapshot({1: 2})
assert v.validate_python({'1': 2, 3: 4, 5: '6', 7: 'x'}, allow_partial=True) == snapshot({1: 2, 3: 4, 5: 6})
with pytest.raises(ValidationError, match='Input should be a valid integer'):
v.validate_python({'1': 2, 3: 4, 5: 'x', 7: '8'})
with pytest.raises(ValidationError, match='Input should be a valid integer'):
v.validate_python({'1': 2, 3: 4, 5: 'x', 7: 'x'})
with pytest.raises(ValidationError, match='Input should be a valid integer'):
v.validate_python({'1': 2, 3: 4, 'x': 6})
def test_dict_list():
v = SchemaValidator(
core_schema.dict_schema(core_schema.int_schema(), core_schema.list_schema(core_schema.int_schema(ge=10)))
)
assert v.validate_python({'1': [20, 30], 3: [40, '50']}, allow_partial=True) == snapshot({1: [20, 30], 3: [40, 50]})
assert v.validate_python({'1': [20, 30], 3: [40, 5]}, allow_partial=True) == snapshot({1: [20, 30], 3: [40]})
with pytest.raises(ValidationError, match=r'1\.1\s+Input should be greater than or equal to 10'):
v.validate_python({'1': [20, 3], 3: [40, 50]}, allow_partial=True)
def test_partial_typed_dict():
v = SchemaValidator(
core_schema.typed_dict_schema(
{
'a': core_schema.typed_dict_field(core_schema.int_schema(gt=10)),
'b': core_schema.typed_dict_field(core_schema.int_schema(gt=10)),
'c': core_schema.typed_dict_field(core_schema.int_schema(gt=10)),
},
total=False,
)
)
assert v.validate_python({'a': 11, 'b': '12', 'c': 13}) == snapshot(IsStrictDict(a=11, b=12, c=13))
assert v.validate_python({'a': 11, 'c': 13, 'b': '12'}) == snapshot(IsStrictDict(a=11, b=12, c=13))
assert v.validate_python(MyMapping({'a': 11, 'c': 13, 'b': '12'})) == snapshot(IsStrictDict(a=11, b=12, c=13))
assert v.validate_python({'a': 11, 'b': '12', 'c': 13}, allow_partial=True) == snapshot({'a': 11, 'b': 12, 'c': 13})
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'a': 11, 'b': '12', 'c': 1})
assert exc_info.value.errors(include_url=False) == snapshot(
[
{
'type': 'greater_than',
'loc': ('c',),
'msg': 'Input should be greater than 10',
'input': 1,
'ctx': {'gt': 10},
}
]
)
assert v.validate_python({'a': 11, 'b': '12', 'c': 1}, allow_partial=True) == snapshot(IsStrictDict(a=11, b=12))
assert v.validate_python(MyMapping({'a': 11, 'b': '12', 'c': 1}), allow_partial=True) == snapshot(
IsStrictDict(a=11, b=12)
)
assert v.validate_python({'a': 11, 'c': 13, 'b': 1}, allow_partial=True) == snapshot(IsStrictDict(a=11, c=13))
with pytest.raises(ValidationError) as exc_info:
v.validate_python({'a': 11, 'c': 1, 'b': 12}, allow_partial=True)
assert exc_info.value.errors(include_url=False) == snapshot(
[
{
'type': 'greater_than',
'loc': ('c',),
'msg': 'Input should be greater than 10',
'input': 1,
'ctx': {'gt': 10},
}
]
)
with pytest.raises(ValidationError, match=r'c\s+Input should be greater than 10'):
v.validate_python(MyMapping({'a': 11, 'c': 1, 'b': 12}), allow_partial=True)
# validate strings
assert v.validate_strings({'a': '11', 'b': '22'}) == snapshot({'a': 11, 'b': 22})
with pytest.raises(ValidationError, match='Input should be greater than 10'):
v.validate_strings({'a': '11', 'b': '2'})
assert v.validate_strings({'a': '11', 'b': '2'}, allow_partial=True) == snapshot({'a': 11})
assert v.validate_json(b'{"b": "12", "a": 11, "c": 13}', allow_partial=True) == IsStrictDict(a=11, b=12, c=13)
assert v.validate_json(b'{"b": "12", "a": 11, "c": 13', allow_partial=True) == IsStrictDict(a=11, b=12, c=13)
assert v.validate_json(b'{"a": 11, "b": "12", "c": 1', allow_partial=True) == IsStrictDict(a=11, b=12)
assert v.validate_json(b'{"a": 11, "b": "12", "c":', allow_partial=True) == IsStrictDict(a=11, b=12)
assert v.validate_json(b'{"a": 11, "b": "12", "c"', allow_partial=True) == IsStrictDict(a=11, b=12)
assert v.validate_json(b'{"a": 11, "b": "12", "c', allow_partial=True) == IsStrictDict(a=11, b=12)
assert v.validate_json(b'{"a": 11, "b": "12", "', allow_partial=True) == IsStrictDict(a=11, b=12)
assert v.validate_json(b'{"a": 11, "b": "12", ', allow_partial=True) == IsStrictDict(a=11, b=12)
assert v.validate_json(b'{"a": 11, "b": "12",', allow_partial=True) == IsStrictDict(a=11, b=12)
assert v.validate_json(b'{"a": 11, "b": "12"', allow_partial=True) == IsStrictDict(a=11, b=12)
def test_non_partial_typed_dict():
v = SchemaValidator(
core_schema.typed_dict_schema(
{
'a': core_schema.typed_dict_field(core_schema.int_schema(gt=10)),
'b': core_schema.typed_dict_field(core_schema.int_schema(gt=10), required=True),
'c': core_schema.typed_dict_field(core_schema.int_schema(gt=10)),
},
total=False,
)
)
assert v.validate_python({'a': 11, 'b': '12', 'c': 13}) == snapshot({'a': 11, 'b': 12, 'c': 13})
with pytest.raises(ValidationError, match='Input should be greater than 10'):
v.validate_python({'a': 11, 'b': '12', 'c': 1})
assert v.validate_python({'a': 11, 'b': '12', 'c': 1}, allow_partial=True) == snapshot({'a': 11, 'b': 12})
with pytest.raises(ValidationError, match=r'b\s+Field required'):
v.validate_python({'a': 11, 'c': 12}, allow_partial=True)
with pytest.raises(ValidationError, match=r'b\s+Input should be greater than 10'):
v.validate_python({'a': 11, 'c': 12, 'b': 1}, allow_partial=True)
def test_double_nested():
v = SchemaValidator(
core_schema.typed_dict_schema(
{
'a': core_schema.typed_dict_field(core_schema.int_schema(gt=10)),
'b': core_schema.typed_dict_field(
core_schema.list_schema(
core_schema.dict_schema(core_schema.str_schema(), core_schema.int_schema(ge=10))
)
),
},
total=False,
)
)
assert v.validate_python({'a': 11, 'b': [{'a': 10, 'b': 20}, {'a': 30, 'b': 40}]}) == snapshot(
{'a': 11, 'b': [{'a': 10, 'b': 20}, {'a': 30, 'b': 40}]}
)
assert v.validate_python({'a': 11, 'b': [{'a': 10, 'b': 20}, {'a': 30, 'b': 4}]}, allow_partial=True) == snapshot(
{'a': 11, 'b': [{'a': 10, 'b': 20}, {'a': 30}]}
)
assert v.validate_python({'a': 11, 'b': [{'a': 10, 'b': 20}, {'a': 30, 123: 4}]}, allow_partial=True) == snapshot(
{'a': 11, 'b': [{'a': 10, 'b': 20}]}
)
# the first element of the list is invalid, so the whole list is invalid
assert v.validate_python({'a': 11, 'b': [{'a': 10, 'b': 2}, {'a': 30}]}, allow_partial=True) == snapshot({'a': 11})
with pytest.raises(ValidationError, match=r'b\.0\.b\s+Input should be greater than or equal to 10'):
v.validate_python({'b': [{'a': 10, 'b': 2}, {'a': 30}], 'a': 11}, allow_partial=True)
with pytest.raises(ValidationError, match=r'b\.1\.a\s+Input should be greater than or equal to 10'):
v.validate_python({'b': [{'a': 10, 'b': 20}, {'a': 3}], 'a': 11}, allow_partial=True)
assert v.validate_python({'a': 11, 'b': [{'a': 1, 'b': 20}, {'a': 3, 'b': 40}]}, allow_partial=True) == snapshot(
{'a': 11}
)
json = b'{"a": 11, "b": [{"a": 10, "b": 20}, {"a": 30, "b": 40}]}'
assert v.validate_json(json, allow_partial=True) == snapshot(
{'a': 11, 'b': [{'a': 10, 'b': 20}, {'a': 30, 'b': 40}]}
)
for i in range(1, len(json)):
value = v.validate_json(json[:i], allow_partial=True)
assert isinstance(value, dict)
def test_tuple_list():
"""Tuples don't support partial, so behaviour should be disabled."""
v = SchemaValidator(
core_schema.tuple_positional_schema(
[core_schema.list_schema(core_schema.int_schema()), core_schema.int_schema()]
)
)
assert v.validate_python([['1', '2'], '3'], allow_partial=True) == snapshot(([1, 2], 3))
with pytest.raises(ValidationError, match=r'1\s+Input should be a valid integer'):
v.validate_python([['1', '2'], 'x'], allow_partial=True)
with pytest.raises(ValidationError, match=r'0\.1\s+Input should be a valid integer'):
v.validate_python([['1', 'x'], '2'], allow_partial=True)
def test_dataclass():
"""Tuples don't support partial, so behaviour should be disabled."""
schema = core_schema.dataclass_args_schema(
'MyDataclass',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema(), kw_only=False),
core_schema.dataclass_field(
name='b', schema=core_schema.list_schema(core_schema.str_schema(min_length=2)), kw_only=False
),
],
)
v = SchemaValidator(schema)
assert v.validate_python({'a': 'x', 'b': ['ab', 'cd']}) == snapshot(({'a': 'x', 'b': ['ab', 'cd']}, None))
assert v.validate_python({'a': 'x', 'b': ['ab', 'cd']}, allow_partial=True) == snapshot(
({'a': 'x', 'b': ['ab', 'cd']}, None)
)
with pytest.raises(ValidationError, match=r'b\.1\s+String should have at least 2 characters'):
v.validate_python({'a': 'x', 'b': ['ab', 'c']}, allow_partial=True)
def test_nullable():
v = SchemaValidator(core_schema.nullable_schema(core_schema.list_schema(core_schema.str_schema(min_length=2))))
assert v.validate_python(None, allow_partial=True) is None
assert v.validate_python(['ab', 'cd'], allow_partial=True) == ['ab', 'cd']
assert v.validate_python(['ab', 'c'], allow_partial=True) == ['ab']
assert v.validate_json('["ab", "cd"]', allow_partial=True) == ['ab', 'cd']
assert v.validate_json('["ab", "cd', allow_partial=True) == ['ab']
assert v.validate_json('["ab", "cd', allow_partial='trailing-strings') == ['ab', 'cd']
assert v.validate_json('["ab", "c', allow_partial=True) == ['ab']
assert v.validate_json('["ab", "c', allow_partial='trailing-strings') == ['ab']
@pytest.mark.parametrize(
'json_nested_type', [None, core_schema.dict_schema(core_schema.str_schema(), core_schema.int_schema())]
)
def test_json(json_nested_type):
v = SchemaValidator(core_schema.list_schema(core_schema.json_schema(json_nested_type)))
assert v.validate_python(['{"a": 1}', '{"b": 2}']) == snapshot([{'a': 1}, {'b': 2}])
assert v.validate_python(['{"a": 1}', '{"b": 2}'], allow_partial=True) == snapshot([{'a': 1}, {'b': 2}])
assert v.validate_python(['{"a": 1}', 'xxx'], allow_partial=True) == snapshot([{'a': 1}])
assert v.validate_python(['{"a": 1}', '{"b": 2'], allow_partial=True) == snapshot([{'a': 1}, {'b': 2}])
assert v.validate_json('["{\\"a\\": 1}", "{\\"b\\": 2}', allow_partial='trailing-strings') == snapshot(
[{'a': 1}, {'b': 2}]
)
assert v.validate_json('["{\\"a\\": 1}", "{\\"b\\": 2', allow_partial='trailing-strings') == snapshot(
[{'a': 1}, {'b': 2}]
)
def test_json_trailing_strings():
v = SchemaValidator(core_schema.list_schema(core_schema.json_schema()))
assert v.validate_python(['{"a": 1}', '{"b": "x'], allow_partial=True) == snapshot([{'a': 1}, {}])
assert v.validate_python(['{"a": 1}', '{"b": "x'], allow_partial='trailing-strings') == snapshot(
[{'a': 1}, {'b': 'x'}]
)
assert v.validate_json('["{\\"a\\": 1}", "{\\"b\\": 2}"]') == snapshot([{'a': 1}, {'b': 2}])
assert v.validate_json('["{\\"a\\": 1}", "{\\"b\\": 2, \\"c\\": \\"x', allow_partial=True) == snapshot([{'a': 1}])
assert v.validate_json(
'["{\\"a\\": 1}", "{\\"b\\": 2, \\"c\\": \\"x', allow_partial='trailing-strings'
) == snapshot([{'a': 1}, {'b': 2, 'c': 'x'}])
| MyMapping |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/distributions.py | {
"start": 3214,
"end": 4312
} | class ____(DiscreteDistInstance):
def __init__(self, logits):
super().__init__()
self.logits = logits
self.probs = torch.softmax(self.logits, dim=-1)
def sample(self):
return torch.multinomial(self.probs, 1)
def deterministic_sample(self):
return torch.argmax(self.probs, dim=1, keepdim=True)
def pdf(self, value):
# This function is equivalent to torch.diag(self.probs.T[value.flatten().long()]),
# but torch.diag is not supported by ONNX export.
idx = torch.arange(start=0, end=len(value)).unsqueeze(-1)
return torch.gather(
self.probs.permute(1, 0)[value.flatten().long()], -1, idx
).squeeze(-1)
def log_prob(self, value):
return torch.log(self.pdf(value) + EPSILON)
def all_log_prob(self):
return torch.log(self.probs + EPSILON)
def entropy(self):
return -torch.sum(
self.probs * torch.log(self.probs + EPSILON), dim=-1
).unsqueeze(-1)
def exported_model_output(self):
return self.sample()
| CategoricalDistInstance |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 39581,
"end": 40532
} | class ____(Request):
"""
Gets the next task from the top of the queue (FIFO). The task entry is removed from the queue.
:param queue: Queue id
:type queue: str
"""
_service = "queues"
_action = "get_next_task"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {"queue": {"description": "Queue id", "type": "string"}},
"required": ["queue"],
"type": "object",
}
def __init__(self, queue: str, **kwargs: Any) -> None:
super(GetNextTaskRequest, self).__init__(**kwargs)
self.queue = queue
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
| GetNextTaskRequest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed3.py | {
"start": 394,
"end": 447
} | class ____(Parent1, extra_items=int):
pass
| Child1_2 |
python | google__jax | jaxlib/mosaic/python/layout_defs.py | {
"start": 1422,
"end": 1574
} | class ____(enum.IntEnum):
MINOR = -1
SECOND_MINOR = -2
MINOR_AND_SECOND_MINOR = -3
def __repr__(self) -> str:
return str(int(self))
| ImplicitDim |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.