language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | yaml__pyyaml | lib/yaml/composer.py | {
"start": 169,
"end": 4883
} | class ____:
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor, event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurrence"
% anchor, self.anchors[anchor].start_mark,
"second occurrence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == '!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == '!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| Composer |
python | doocs__leetcode | solution/2100-2199/2113.Elements in Array After Removing and Replacing Elements/Solution.py | {
"start": 0,
"end": 394
} | class ____:
def elementInNums(self, nums: List[int], queries: List[List[int]]) -> List[int]:
n, m = len(nums), len(queries)
ans = [-1] * m
for j, (t, i) in enumerate(queries):
t %= 2 * n
if t < n and i < n - t:
ans[j] = nums[i + t]
elif t > n and i < t - n:
ans[j] = nums[i]
return ans
| Solution |
python | mlflow__mlflow | tests/pytorch/iris.py | {
"start": 2848,
"end": 3554
} | class ____(IrisClassificationBase):
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = self.cross_entropy_loss(logits, y)
self.train_acc(torch.argmax(logits, dim=1), y)
self.log("train_acc", self.train_acc.compute(), on_step=False, on_epoch=True)
self.log("loss", loss)
return {"loss": loss}
def test_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.cross_entropy(logits, y)
self.test_acc(torch.argmax(logits, dim=1), y)
self.log("test_loss", loss)
self.log("test_acc", self.test_acc.compute())
| IrisClassificationWithoutValidation |
python | getsentry__sentry | src/sentry/issues/endpoints/organization_codeowners_associations.py | {
"start": 769,
"end": 2325
} | class ____(OrganizationEndpoint):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (OrganizationIntegrationsLoosePermission,)
def get(self, request: Request, organization: Organization) -> Response:
"""
Returns all ProjectCodeOwners associations for an organization as a dict with projects as keys
e.g. {"projectSlug": {associations: {...}, errors: {...}}, ...]
"""
projects = Project.objects.filter(
organization=organization,
status=ObjectStatus.ACTIVE,
)
project_code_owners = ProjectCodeOwners.objects.filter(project__in=projects)
provider = request.GET.get("provider")
if provider:
org_integrations = integration_service.get_organization_integrations(
providers=[provider],
organization_ids=[pco.project.organization_id for pco in project_code_owners],
)
project_code_owners = project_code_owners.filter(
repository_project_path_config__organization_integration_id__in={
oi.id for oi in org_integrations
}
)
result = {}
for pco in project_code_owners:
associations, errors = build_codeowners_associations(pco.raw, pco.project)
result[pco.project.slug] = {"associations": associations, "errors": errors}
return self.respond(result, status=status.HTTP_200_OK)
| OrganizationCodeOwnersAssociationsEndpoint |
python | ipython__ipython | IPython/extensions/tests/test_deduperreload.py | {
"start": 64024,
"end": 64815
} | class ____(ShellFixture):
def test_reload_enums(self):
self.shell.magic_autoreload("2")
mod_name, mod_fn = self.new_module(
"""
from enum import Enum
class MyEnum(Enum):
A = 'A'
B = 'B'
""",
)
self.shell.run_code("import %s" % mod_name)
self.shell.run_code("pass")
mod = sys.modules[mod_name]
self.write_file(
mod_fn,
"""
from enum import Enum
class MyEnum(Enum):
A = 'A'
B = 'B'
C = 'C'
""",
)
self.shell.run_code("pass")
assert mod.MyEnum.C.value == "C"
if __name__ == "__main__":
unittest.main()
| TestAutoreloadEnum |
python | doocs__leetcode | solution/1100-1199/1102.Path With Maximum Minimum Value/Solution.py | {
"start": 0,
"end": 741
} | class ____:
def maximumMinimumPath(self, grid: List[List[int]]) -> int:
def find(x: int) -> int:
if p[x] != x:
p[x] = find(p[x])
return p[x]
m, n = len(grid), len(grid[0])
p = list(range(m * n))
q = [(v, i, j) for i, row in enumerate(grid) for j, v in enumerate(row)]
q.sort()
ans = 0
dirs = (-1, 0, 1, 0, -1)
vis = set()
while find(0) != find(m * n - 1):
v, i, j = q.pop()
ans = v
vis.add((i, j))
for a, b in pairwise(dirs):
x, y = i + a, j + b
if (x, y) in vis:
p[find(i * n + j)] = find(x * n + y)
return ans
| Solution |
python | google__jax | jax/experimental/pallas/ops/gpu/blackwell_matmul_mgpu.py | {
"start": 1075,
"end": 12105
} | class ____:
tile_m: int
tile_n: int
tile_k: int
max_concurrent_steps: int
collective: bool
epilogue_tile_n: int = 64
grid_minor_dim: MatmulDimension = MatmulDimension.N
grid_tile_width: int = 1
def matmul_kernel(a, b, config: TuningConfig):
dtype = a.dtype
if a.dtype != b.dtype:
raise ValueError(
f"Matmul LHS and RHS have incompatible dtypes {a.dtype} vs {b.dtype}"
)
m, k = a.shape
k2, n = b.shape
if k != k2:
raise ValueError(
f"Matmul LHS and RHS have incompatible shapes {a.shape} vs {b.shape}"
)
collective = config.collective
tile_m, tile_n, tile_k = (config.tile_m, config.tile_n, config.tile_k)
epilogue_tile_n = config.epilogue_tile_n
if tile_n % epilogue_tile_n != 0:
raise ValueError(
f"{tile_n=} must be divisible by {epilogue_tile_n=}"
)
block_tile_m = tile_m
block_tile_n = tile_n
if collective:
tile_m *= 2
tile_n *= 2
swizzle = plgpu.find_swizzle(tile_k * jnp.dtype(dtype).itemsize * 8)
swizzle_elems = swizzle // jnp.dtype(dtype).itemsize
transforms = (
plgpu.TilingTransform((8, swizzle_elems)),
plgpu.SwizzleTransform(swizzle),
)
out_swizzle = plgpu.find_swizzle(epilogue_tile_n * jnp.dtype(dtype).itemsize * 8)
out_swizzle_elems = out_swizzle // jnp.dtype(dtype).itemsize
out_transforms = (
plgpu.TilingTransform((8, out_swizzle_elems)),
plgpu.SwizzleTransform(out_swizzle),
)
if m % tile_m != 0:
raise ValueError(f"{m=} must be divisible by {tile_m=}")
if n % tile_n != 0:
raise ValueError(f"{n=} must be divisible by {tile_n=}")
if k % tile_k != 0:
raise ValueError(f"{k=} must be divisible by {tile_k=}")
m_iters = m // tile_m
n_iters = n // tile_n
k_iters = k // tile_k
max_concurrent_steps = config.max_concurrent_steps
TMA_WARP = 0
MMA_WARP = 1
COMPUTE_WG = 0
STORE_WG = 1
def kernel(a_gmem, b_gmem, out_gmem,
a_smem, b_smem, acc_tmem, acc_smem,
ab_tma_barrier, store_done_barrier, mma_done_barrier,
consumed_barrier):
wg_idx = lax.axis_index("wg")
cluster_idx = lax.axis_index("x")
is_lead_block = cluster_idx == 0
@plgpu.dynamic_scheduling_loop(grid_names=("mn_linear",), thread_axis="wg")
def mn_loop(loop_info: plgpu.NDLoopInfo): # pylint: disable=unused-variable
(lin_idx,) = loop_info.index
local_index = loop_info.local_index
m_index, n_index = plgpu.planar_snake(
lin_idx,
(m_iters, n_iters),
config.grid_minor_dim,
config.grid_tile_width,
)
block_m_index = m_index * 2 + cluster_idx if collective else m_index
block_slice_m = pl.ds(block_m_index * block_tile_m, block_tile_m)
slice_m = pl.ds(m_index * tile_m, tile_m)
slice_n = pl.ds(n_index * tile_n, tile_n)
acc_slot = lax.rem(local_index, jnp.int32(2))
@pl.when(wg_idx == COMPUTE_WG)
def _():
@pl.core_map(plgpu.WarpMesh(axis_name="warp"))
def _per_warp():
warp_id = lax.axis_index("warp")
@pl.when(warp_id == TMA_WARP)
def _memory():
def _loop_body(ki, _):
slice_k = pl.ds(ki * tile_k, tile_k)
slot = lax.rem(ki, max_concurrent_steps)
@pl.when(jnp.logical_or(ki >= max_concurrent_steps,
local_index > 0))
def _():
plgpu.barrier_wait(consumed_barrier.at[slot])
plgpu.copy_gmem_to_smem(
a_gmem.at[slice_m, slice_k],
a_smem.at[slot],
ab_tma_barrier.at[slot],
partitioned_axis=0 if collective else None,
collective_axes="x" if collective else None,
)
plgpu.copy_gmem_to_smem(
b_gmem.at[slice_k, slice_n],
b_smem.at[slot],
ab_tma_barrier.at[slot],
partitioned_axis=1 if collective else None,
collective_axes="x" if collective else None,
)
lax.fori_loop(0, k_iters, _loop_body, None)
@pl.when(jnp.logical_and(warp_id == MMA_WARP, local_index > 1))
def _wait_store():
plgpu.barrier_wait(store_done_barrier.at[acc_slot])
@pl.when(jnp.logical_and(warp_id == MMA_WARP, is_lead_block))
def _compute():
def _loop_body(ki, _):
slot = lax.rem(ki, max_concurrent_steps)
plgpu.barrier_wait(ab_tma_barrier.at[slot])
is_last_iter = ki >= k_iters - 1
acc_tmem_slice = acc_tmem.at[:, pl.ds(acc_slot * tile_n, tile_n)]
plgpu.tcgen05_mma(
acc_tmem_slice,
a_smem.at[slot],
b_smem.at[slot],
consumed_barrier.at[slot],
accumulate=(ki > 0),
collective_axis="x" if collective else None,
)
@pl.when(is_last_iter)
def _():
plgpu.tcgen05_commit_arrive(
mma_done_barrier.at[acc_slot],
collective_axis="x" if collective else None,
)
lax.fori_loop(0, k_iters, _loop_body, None)
@pl.when(wg_idx == STORE_WG)
def _():
plgpu.wait_smem_to_gmem(0, wait_read_only=True)
plgpu.barrier_wait(mma_done_barrier.at[acc_slot])
acc_tmem_slot = acc_tmem.at[:, pl.ds(acc_slot * tile_n, tile_n)]
step_out_gmem = out_gmem.at[block_slice_m, slice_n]
for ni in range(tile_n // epilogue_tile_n):
acc_smem_ni = acc_smem.at[ni % 2]
ni_col_slice = pl.ds(ni * epilogue_tile_n, epilogue_tile_n)
acc_smem_ni[...] = plgpu.async_load_tmem(
acc_tmem_slot.at[:, ni_col_slice]
).astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(acc_smem_ni, step_out_gmem.at[:, ni_col_slice])
plgpu.wait_smem_to_gmem(1, wait_read_only=True)
plgpu.wait_load_tmem() # Load must complete before we continue.
plgpu.barrier_arrive(store_done_barrier.at[acc_slot])
if collective:
store_done_barrier = plgpu.ClusterBarrier(
collective_axes=("x",),
num_arrivals=1,
num_barriers=2,
orders_tensor_core=True,
)
else:
store_done_barrier = plgpu.Barrier( # type: ignore
num_arrivals=1, num_barriers=2, orders_tensor_core=True
)
f = plgpu.kernel(
kernel,
out_shape=jax.ShapeDtypeStruct((m, n), dtype),
grid=(m_iters * n_iters,),
grid_names=("mn_linear",),
num_threads=2,
thread_name="wg",
cluster_names=("x",),
cluster=(1 + collective,),
scratch_shapes=dict(
a_smem=plgpu.SMEM(
(max_concurrent_steps, block_tile_m, tile_k),
dtype,
transforms=transforms,
),
b_smem=plgpu.SMEM(
(max_concurrent_steps, tile_k, block_tile_n),
dtype,
transforms=transforms,
),
acc_tmem=plgpu.TMEM(
(block_tile_m, tile_n * 2), jnp.float32, collective=collective
),
acc_smem=plgpu.SMEM(
(2, block_tile_m, epilogue_tile_n),
dtype,
transforms=out_transforms,
),
ab_tma_barrier=plgpu.Barrier(
num_arrivals=2, num_barriers=max_concurrent_steps
),
store_done_barrier=store_done_barrier,
mma_done_barrier=plgpu.Barrier(
num_arrivals=1, num_barriers=2, orders_tensor_core=True
),
consumed_barrier=plgpu.Barrier(
num_arrivals=1,
num_barriers=max_concurrent_steps,
orders_tensor_core=True,
),
),
)
return f(a, b)
def main(_) -> None:
problem_it = [(4096, 8192, 4096)]
for M, N, K in problem_it:
print(f"==== {M=} {N=} {K=} ====")
matmul_flops = 2 * M * N * K
peak_flops = 2.25e15 # f16 TensorCore peak = 2250 TFLOPS
a = jax.random.uniform(jax.random.key(1), (M, K), jnp.float16, -1, 1)
b = jax.random.uniform(jax.random.key(2), (K, N), jnp.float16, -1, 1)
tuning_it = itertools.product(
(128,), # tile_m
(128, 256), # tile_n
(64,), # tile_k
MatmulDimension, # grid_minor_dim
(1, 4, 8, 12, 16), # grid_tile_width
(2, 4, 6), # max_concurrent_steps
(False, True), # collective
(32,), # epilogue_tile_n
)
best_util = -float("inf")
expected = jnp.dot(a, b, precision=jax.lax.DotAlgorithmPreset.F16_F16_F32)
for (tile_m, tile_n, tile_k, grid_minor_dim, grid_tile_width,
max_concurrent_steps, collective, epilogue_tile_n) in tuning_it:
# Only N <= 128 are supported for collective MMAs
if collective and tile_n > 128:
continue
config = TuningConfig(
tile_m=tile_m,
tile_n=tile_n,
tile_k=tile_k,
max_concurrent_steps=max_concurrent_steps,
collective=collective,
epilogue_tile_n=epilogue_tile_n,
grid_minor_dim=grid_minor_dim,
grid_tile_width=grid_tile_width,
)
if collective:
tile_m *= 2
tile_n *= 2
try:
out, runtimes_ms = profiler.measure(
functools.partial(matmul_kernel, config=config), iterations=10
)(a, b)
assert runtimes_ms is not None
runtime_ms = statistics.median(runtimes_ms)
except ValueError as e:
if ("exceeds available shared memory" in e.args[0] or
"Accumulator layout mismatch:" in e.args[0]):
# Accumulator layout mismatch triggers for tile_n=256 on some configs.
continue
raise
runtime_us = runtime_ms * 1e3 # type: ignore
optimal_time = matmul_flops / peak_flops * 1e6 # us
achieved_tc_util = optimal_time / runtime_us * 100
if achieved_tc_util > best_util:
np.testing.assert_allclose(out, expected)
best_util = achieved_tc_util
print(
f"{tile_m=} {tile_n=} {tile_k=} {max_concurrent_steps=} "
f"{grid_minor_dim=} {grid_tile_width=} "
f"{epilogue_tile_n=} "
f"{collective=} : "
f"{runtime_us:<7.1f}us"
f" = {achieved_tc_util:4.1f}% TC utilization"
)
print(f"\tBest utilization: {best_util:4.1f}%")
_, runtimes_ms = profiler.measure(
functools.partial(
jnp.dot, precision=jax.lax.DotAlgorithmPreset.F16_F16_F32
),
iterations=10,
)(a, b)
assert runtimes_ms is not None
runtime_ms = statistics.median(runtimes_ms)
runtime_us = runtime_ms * 1e3 # type: ignore
optimal_time = matmul_flops / peak_flops * 1e6 # us
achieved_tc_util = optimal_time / runtime_us * 100
print(f"\tReference: {achieved_tc_util:4.1f}%")
if __name__ == "__main__":
from absl import app
jax.config.config_with_absl()
app.run(main)
| TuningConfig |
python | kamyu104__LeetCode-Solutions | Python/count-substrings-with-k-frequency-characters-ii.py | {
"start": 78,
"end": 550
} | class ____(object):
def numberOfSubstrings(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
cnt = [0]*26
result = left = 0
for right in xrange(len(s)):
cnt[ord(s[right])-ord('a')] += 1
while cnt[ord(s[right])-ord('a')] == k:
result += (len(s)-1)-right+1
cnt[ord(s[left])-ord('a')] -= 1
left += 1
return result
| Solution |
python | pypa__warehouse | tests/unit/test_sessions.py | {
"start": 1648,
"end": 11406
} | class ____:
@pytest.mark.parametrize(
("data", "expected"), [(None, {}), ({}, {}), ({"foo": "bar"}, {"foo": "bar"})]
)
def test_create_new(self, monkeypatch, data, expected):
monkeypatch.setattr(time, "time", lambda: 100)
monkeypatch.setattr(crypto, "random_token", lambda: "123456")
session = Session(data)
assert session == expected
assert session.sid == "123456"
assert session.new
assert session.created == 100
assert not session.invalidated
@pytest.mark.parametrize(
("data", "expected", "new"),
[
(None, {}, True),
({}, {}, True),
({"foo": "bar"}, {"foo": "bar"}, True),
(None, {}, False),
({}, {}, False),
({"foo": "bar"}, {"foo": "bar"}, False),
],
)
def test_create_with_session_id(self, monkeypatch, data, expected, new):
monkeypatch.setattr(time, "time", lambda: 100)
session = Session(data, "wat", new)
assert session == expected
assert session.sid == "wat"
assert session.new is new
assert session.created == 100
assert not session.invalidated
def test_changed_marks_as_changed(self):
session = Session()
assert not session._changed
session.changed()
assert session._changed
def test_invalidate(self, monkeypatch):
session_ids = iter(["123456", "7890"])
monkeypatch.setattr(crypto, "random_token", lambda: next(session_ids))
session = Session({"foo": "bar"}, "original id", False)
assert session == {"foo": "bar"}
assert session.sid == "original id"
assert not session.new
assert not session.invalidated
session.invalidate()
assert session == {}
assert session.sid == "123456"
assert session.new
assert session.invalidated == {"original id"}
session.invalidate()
assert session == {}
assert session.sid == "7890"
assert session.new
assert session.invalidated == {"original id", "123456"}
def test_invalidate_empty(self):
session = Session({"foo": "bar"})
session.invalidate()
assert session == {}
assert session.invalidated == set()
def test_should_save(self):
session = Session()
assert not session.should_save()
session.changed()
assert session.should_save()
def test_reauth_record(self, pyramid_request):
session = Session()
assert not session.should_save()
session.record_auth_timestamp()
assert session.should_save()
def test_reauth_unneeded(self):
session = Session()
session.record_auth_timestamp()
assert not session.needs_reauthentication(666)
def test_reauth_needed(self):
session = Session()
session[session._reauth_timestamp_key] = 0
assert session.needs_reauthentication(666)
def test_reauth_needed_no_value(self):
session = Session()
assert session.needs_reauthentication(666)
@pytest.mark.parametrize(
("data", "method", "args"),
[
({"foo": "bar"}, "__delitem__", ["foo"]),
({}, "__setitem__", ["foo", "bar"]),
({}, "clear", []),
({"foo": "bar"}, "pop", ["foo"]),
({"foo": "bar"}, "popitem", []),
({}, "setdefault", ["foo", "bar"]),
({}, "update", [{"foo": "bar"}]),
],
)
def test_methods_call_changed(self, data, method, args):
session = Session(data)
session.changed = pretend.call_recorder(lambda: None)
getattr(session, method)(*args)
assert session.changed.calls == [pretend.call()]
@pytest.mark.parametrize(
("queue", "expected"),
[(None, "_flash_messages"), ("foobar", "_flash_messages.foobar")],
)
def test_generate_flash_key(self, queue, expected):
session = Session()
assert session._get_flash_queue_key(queue) == expected
def test_flash_messages(self, monkeypatch):
_markup = pretend.call_recorder(lambda x: x)
monkeypatch.setattr(markupsafe, "Markup", _markup)
session = Session()
assert session.peek_flash() == []
assert session.peek_flash(queue="foo") == []
assert session.pop_flash() == []
assert session.pop_flash(queue="foo") == []
session.flash("A Flash Message")
assert session.peek_flash() == [{"msg": "A Flash Message", "safe": False}]
assert session.peek_flash(queue="foo") == []
session.flash("Another Flash Message", queue="foo", safe=True)
assert session.peek_flash() == [{"msg": "A Flash Message", "safe": False}]
assert session.peek_flash(queue="foo") == [
{"msg": "Another Flash Message", "safe": True}
]
session.flash("A Flash Message")
assert session.peek_flash() == [
{"msg": "A Flash Message", "safe": False},
{"msg": "A Flash Message", "safe": False},
]
assert session.peek_flash(queue="foo") == [
{"msg": "Another Flash Message", "safe": True}
]
session.flash("A Flash Message", allow_duplicate=True)
assert session.peek_flash() == [
{"msg": "A Flash Message", "safe": False},
{"msg": "A Flash Message", "safe": False},
{"msg": "A Flash Message", "safe": False},
]
assert session.peek_flash(queue="foo") == [
{"msg": "Another Flash Message", "safe": True}
]
session.flash("A Flash Message", allow_duplicate=False)
assert session.peek_flash() == [
{"msg": "A Flash Message", "safe": False},
{"msg": "A Flash Message", "safe": False},
{"msg": "A Flash Message", "safe": False},
]
assert session.peek_flash(queue="foo") == [
{"msg": "Another Flash Message", "safe": True}
]
assert session.pop_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.pop_flash(queue="foo") == [
"Another Flash Message",
]
assert _markup.calls == [pretend.call("Another Flash Message")]
assert session.peek_flash() == []
assert session.peek_flash(queue="foo") == []
assert session.pop_flash() == []
assert session.pop_flash(queue="foo") == []
def test_csrf_token(self, monkeypatch):
tokens = iter(["123456", "7890"])
monkeypatch.setattr(crypto, "random_token", lambda: next(tokens))
session = Session()
assert session._csrf_token_key not in session
assert session.new_csrf_token() == "123456"
assert session._csrf_token_key in session
assert session.get_csrf_token() == "123456"
assert session.new_csrf_token() == "7890"
assert session._csrf_token_key in session
assert session.get_csrf_token() == "7890"
def test_get_csrf_token_empty(self):
session = Session()
session.new_csrf_token = pretend.call_recorder(lambda: "123456")
assert session.get_csrf_token() == "123456"
assert session.new_csrf_token.calls == [pretend.call()]
def test_get_totp_secret(self):
session = Session()
session[session._totp_secret_key] = b"foobar"
assert session.get_totp_secret() == b"foobar"
def test_get_totp_secret_empty(self, monkeypatch):
generate_totp_secret = pretend.call_recorder(lambda: b"foobar")
monkeypatch.setattr(otp, "generate_totp_secret", generate_totp_secret)
session = Session()
assert session.get_totp_secret() == b"foobar"
assert session._totp_secret_key in session
def test_clear_totp_secret(self):
session = Session()
session[session._totp_secret_key] = b"foobar"
session.clear_totp_secret()
assert not session[session._totp_secret_key]
def test_get_webauthn_challenge(self):
session = Session()
session[session._webauthn_challenge_key] = "not_a_real_challenge"
assert session.get_webauthn_challenge() == "not_a_real_challenge"
def test_get_webauthn_challenge_empty(self, monkeypatch):
generate_webauthn_challenge = pretend.call_recorder(
lambda: "not_a_real_challenge"
)
monkeypatch.setattr(
webauthn, "generate_webauthn_challenge", generate_webauthn_challenge
)
session = Session()
assert session.get_webauthn_challenge() == "not_a_real_challenge"
assert session._webauthn_challenge_key in session
def test_clear_webauthn_challenge(self):
session = Session()
session[session._webauthn_challenge_key] = "not_a_real_challenge"
session.clear_webauthn_challenge()
assert not session[session._webauthn_challenge_key]
def test_record_password_timestamp(self):
session = Session()
assert not session.should_save()
session.record_password_timestamp(1646230636)
assert session[session._password_timestamp_key] == 1646230636
assert session.should_save()
@pytest.mark.parametrize(
("stored", "current", "expected"),
[
(1600000000, 0, True),
(1600000000, 1600000000, False),
(0, 1600000000, True),
(None, 1600000000, False),
],
)
def test_password_outdated(self, stored, current, expected):
session = Session()
session.record_password_timestamp(stored)
assert session.password_outdated(current) == expected
| TestSession |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1299945,
"end": 1301607
} | class ____(sgqlc.types.Type, Node, RepositoryNode):
"""A Pinned Discussion is a discussion pinned to a repository's index
page.
"""
__schema__ = github_schema
__field_names__ = (
"created_at",
"database_id",
"discussion",
"gradient_stop_colors",
"pattern",
"pinned_by",
"preconfigured_gradient",
"updated_at",
)
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
discussion = sgqlc.types.Field(sgqlc.types.non_null(Discussion), graphql_name="discussion")
"""The discussion that was pinned."""
gradient_stop_colors = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(String))), graphql_name="gradientStopColors"
)
"""Color stops of the chosen gradient"""
pattern = sgqlc.types.Field(sgqlc.types.non_null(PinnedDiscussionPattern), graphql_name="pattern")
"""Background texture pattern"""
pinned_by = sgqlc.types.Field(sgqlc.types.non_null(Actor), graphql_name="pinnedBy")
"""The actor that pinned this discussion."""
preconfigured_gradient = sgqlc.types.Field(PinnedDiscussionGradient, graphql_name="preconfiguredGradient")
"""Preconfigured background gradient option"""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
| PinnedDiscussion |
python | ionelmc__pytest-benchmark | src/pytest_benchmark/utils.py | {
"start": 1748,
"end": 6486
} | class ____:
def __init__(self, fallback, exceptions):
self.fallback = fallback
self.functions = []
self.exceptions = exceptions
def __call__(self, *args, **kwargs):
for func in self.functions:
try:
value = func(*args, **kwargs)
except self.exceptions:
continue
else:
if value:
return value
else:
return self.fallback(*args, **kwargs)
def register(self, other):
self.functions.append(other)
return self
@partial(Fallback, exceptions=(IndexError, CalledProcessError, OSError))
def get_project_name():
return Path.cwd().name
@get_project_name.register
def get_project_name_git():
is_git = check_output(['git', 'rev-parse', '--git-dir'], stderr=subprocess.STDOUT)
if is_git:
project_address = check_output(['git', 'config', '--local', 'remote.origin.url'])
if isinstance(project_address, bytes):
project_address = project_address.decode()
project_name = [i for i in re.split(r'[/:\s\\]|\.git', project_address) if i][-1]
return project_name.strip()
@get_project_name.register
def get_project_name_hg():
with open(os.devnull, 'w') as devnull:
project_address = check_output(['hg', 'path', 'default'], stderr=devnull)
project_address = project_address.decode()
project_name = project_address.split('/')[-1]
return project_name.strip()
def in_any_parent(name, path=None):
prev = None
if not path:
path = Path.cwd()
while path and prev != path and not path.joinpath(name).exists():
prev = path
path = path.parent
return path.joinpath(name).exists()
def subprocess_output(cmd):
return check_output(cmd.split(), stderr=subprocess.STDOUT, universal_newlines=True).strip()
def get_commit_info(project_name=None):
dirty = False
commit = 'unversioned'
commit_time = None
author_time = None
project_name = project_name or get_project_name()
branch = '(unknown)'
try:
if in_any_parent('.git'):
desc = subprocess_output('git describe --dirty --always --long --abbrev=40')
desc = desc.split('-')
if desc[-1].strip() == 'dirty':
dirty = True
desc.pop()
commit = desc[-1].strip('g')
commit_time = subprocess_output('git show -s --pretty=format:"%cI"').strip('"')
author_time = subprocess_output('git show -s --pretty=format:"%aI"').strip('"')
branch = subprocess_output('git rev-parse --abbrev-ref HEAD')
if branch == 'HEAD':
branch = '(detached head)'
elif in_any_parent('.hg'):
desc = subprocess_output('hg id --id --debug')
if desc[-1] == '+':
dirty = True
commit = desc.strip('+')
commit_time = subprocess_output('hg tip --template "{date|rfc3339date}"').strip('"')
branch = subprocess_output('hg branch')
return {
'id': commit,
'time': commit_time,
'author_time': author_time,
'dirty': dirty,
'project': project_name,
'branch': branch,
}
except Exception as exc:
return {
'id': 'unknown',
'time': None,
'author_time': None,
'dirty': dirty,
'error': f'CalledProcessError({exc.returncode}, {exc.output!r})' if isinstance(exc, CalledProcessError) else repr(exc),
'project': project_name,
'branch': branch,
}
def get_current_time():
return datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')
def first_or_value(obj, value):
if obj:
(value,) = obj
return value
def short_filename(path, machine_id=None):
parts = []
try:
last = len(path.parts) - 1
except AttributeError:
return str(path)
for pos, part in enumerate(path.parts):
if not pos and part == machine_id:
continue
if pos == last:
part = part.rsplit('.', 1)[0]
# if len(part) > 16:
# part = "%.13s..." % part
parts.append(part)
return '/'.join(parts)
def load_timer(string):
if '.' not in string:
raise argparse.ArgumentTypeError("Value for --benchmark-timer must be in dotted form. Eg: 'module.attr'.")
mod, attr = string.rsplit('.', 1)
if mod == 'pep418':
import time # noqa: PLC0415
return NameWrapper(getattr(time, attr))
else:
__import__(mod)
mod = sys.modules[mod]
return NameWrapper(getattr(mod, attr))
| Fallback |
python | keras-team__keras | guides/writing_a_custom_training_loop_in_tensorflow.py | {
"start": 10277,
"end": 18097
} | class ____(keras.layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * tf.reduce_sum(inputs))
return inputs
"""
Let's build a really simple model that uses it:
"""
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = keras.layers.Dense(64, activation="relu")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
"""
Here's what our training step should look like now:
"""
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
# Add any extra losses created during the forward pass.
loss_value += sum(model.losses)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply(grads, model.trainable_weights)
train_acc_metric.update_state(y, logits)
return loss_value
"""
## Summary
Now you know everything there is to know about using built-in training loops and
writing your own from scratch.
To conclude, here's a simple end-to-end example that ties together everything
you've learned in this guide: a DCGAN trained on MNIST digits.
"""
"""
## End-to-end example: a GAN training loop from scratch
You may be familiar with Generative Adversarial Networks (GANs). GANs can generate new
images that look almost real, by learning the latent distribution of a training
dataset of images (the "latent space" of the images).
A GAN is made of two parts: a "generator" model that maps points in the latent
space to points in image space, a "discriminator" model, a classifier
that can tell the difference between real images (from the training dataset)
and fake images (the output of the generator network).
A GAN training loop looks like this:
1) Train the discriminator.
- Sample a batch of random points in the latent space.
- Turn the points into fake images via the "generator" model.
- Get a batch of real images and combine them with the generated images.
- Train the "discriminator" model to classify generated vs. real images.
2) Train the generator.
- Sample random points in the latent space.
- Turn the points into fake images via the "generator" network.
- Get a batch of real images and combine them with the generated images.
- Train the "generator" model to "fool" the discriminator and classify the fake images
as real.
For a much more detailed overview of how GANs works, see
[Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
Let's implement this training loop. First, create the discriminator meant to classify
fake vs real digits:
"""
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
keras.layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.GlobalMaxPooling2D(),
keras.layers.Dense(1),
],
name="discriminator",
)
discriminator.summary()
"""
Then let's create a generator network,
that turns latent vectors into outputs of shape `(28, 28, 1)` (representing
MNIST digits):
"""
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
keras.layers.Dense(7 * 7 * 128),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Reshape((7, 7, 128)),
keras.layers.Conv2DTranspose(
128, (4, 4), strides=(2, 2), padding="same"
),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Conv2DTranspose(
128, (4, 4), strides=(2, 2), padding="same"
),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
"""
Here's the key bit: the training loop. As you can see it is quite straightforward. The
training step function only takes 17 lines.
"""
# Instantiate one optimizer for the discriminator and another for the generator.
d_optimizer = keras.optimizers.Adam(learning_rate=0.0003)
g_optimizer = keras.optimizers.Adam(learning_rate=0.0004)
# Instantiate a loss function.
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def train_step(real_images):
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Decode them to fake images
generated_images = generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((real_images.shape[0], 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(labels.shape)
# Train the discriminator
with tf.GradientTape() as tape:
predictions = discriminator(combined_images)
d_loss = loss_fn(labels, predictions)
grads = tape.gradient(d_loss, discriminator.trainable_weights)
d_optimizer.apply(grads, discriminator.trainable_weights)
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = discriminator(generator(random_latent_vectors))
g_loss = loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, generator.trainable_weights)
g_optimizer.apply(grads, generator.trainable_weights)
return d_loss, g_loss, generated_images
"""
Let's train our GAN, by repeatedly calling `train_step` on batches of images.
Since our discriminator and generator are convnets, you're going to want to
run this code on a GPU.
"""
# Prepare the dataset. We use both the training & test MNIST digits.
batch_size = 64
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
dataset = tf.data.Dataset.from_tensor_slices(all_digits)
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
epochs = 1 # In practice you need at least 20 epochs to generate nice digits.
save_dir = "./"
for epoch in range(epochs):
print(f"\nStart epoch {epoch}")
for step, real_images in enumerate(dataset):
# Train the discriminator & generator on one batch of real images.
d_loss, g_loss, generated_images = train_step(real_images)
# Logging.
if step % 100 == 0:
# Print metrics
print(f"discriminator loss at step {step}: {d_loss:.2f}")
print(f"adversarial loss at step {step}: {g_loss:.2f}")
# Save one generated image
img = keras.utils.array_to_img(
generated_images[0] * 255.0, scale=False
)
img.save(os.path.join(save_dir, f"generated_img_{step}.png"))
# To limit execution time we stop after 10 steps.
# Remove the lines below to actually train the model!
if step > 10:
break
"""
That's it! You'll get nice-looking fake MNIST digits after just ~30s of training on the
Colab GPU.
"""
| ActivityRegularizationLayer |
python | tiangolo__fastapi | tests/test_response_model_include_exclude.py | {
"start": 156,
"end": 212
} | class ____(BaseModel):
ref: Model1
baz: str
| Model2 |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/pkg_a/package.py | {
"start": 1180,
"end": 1759
} | class ____(autotools.AutotoolsBuilder):
def with_or_without_fee(self, activated):
if not activated:
return "--no-fee"
return "--fee-all-the-time"
def autoreconf(self, pkg, spec, prefix):
pass
def configure(self, pkg, spec, prefix):
pass
def build(self, pkg, spec, prefix):
pass
def install(self, pkg, spec, prefix):
# sanity_check_prefix requires something in the install directory
# Test requires overriding the one provided by `AutotoolsPackage`
mkdirp(prefix.bin)
| AutotoolsBuilder |
python | ray-project__ray | python/ray/tests/test_client_reconnect.py | {
"start": 1266,
"end": 2586
} | class ____(ray_client_pb2_grpc.RayletDataStreamerServicer):
"""
Forwards all requests to the real data servicer. Useful for injecting
errors between a client and server pair.
"""
def __init__(
self, on_response: Optional[Hook] = None, on_request: Optional[Hook] = None
):
"""
Args:
on_response: Optional hook to inject errors before sending back a
response
"""
self.stub = None
self.on_response = on_response
self.on_request = on_request
def set_channel(self, channel: grpc.Channel) -> None:
self.stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
def _requests(self, request_iterator):
for req in request_iterator:
if self.on_request:
self.on_request(req)
yield req
def Datapath(self, request_iterator, context):
try:
for response in self.stub.Datapath(
self._requests(request_iterator), metadata=context.invocation_metadata()
):
if self.on_response:
self.on_response(response)
yield response
except grpc.RpcError as e:
context.set_code(e.code())
context.set_details(e.details())
| MiddlemanDataServicer |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_doc_integration_avatar.py | {
"start": 422,
"end": 1212
} | class ____(APITestCase):
endpoint = "sentry-api-0-doc-integration-avatar"
def setUp(self) -> None:
self.user = self.create_user(email="peter@marvel.com", is_superuser=True)
self.superuser = self.create_user(email="gwen@marvel.com", is_superuser=True)
self.staff_user = self.create_user(is_staff=True)
self.draft_doc = self.create_doc_integration(
name="spiderman", is_draft=True, has_avatar=True
)
self.published_doc = self.create_doc_integration(
name="spiderwoman", is_draft=False, has_avatar=True
)
self.avatar_payload = {
"avatar_photo": b64encode(self.load_fixture("rookout-color.png")),
"avatar_type": "upload",
}
@control_silo_test
| DocIntegrationAvatarTest |
python | bokeh__bokeh | src/bokeh/models/sources.py | {
"start": 29035,
"end": 29950
} | class ____(Model):
''' A view into a ``ColumnDataSource`` that represents a row-wise subset.
'''
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
filter = Instance(Filter, default=InstanceDefault(AllIndices), help="""
Defines the subset of indices to use from the data source this view applies to.
By default all indices are used (``AllIndices`` filter). This can be changed by
using specialized filters like ``IndexFilter``, ``BooleanFilter``, etc. Filters
can be composed using set operations to create non-trivial data masks. This can
be accomplished by directly using models like ``InversionFilter``, ``UnionFilter``,
etc., or by using set operators on filters, e.g.:
.. code-block:: python
# filters everything but indexes 10 and 11
cds_view.filter &= ~IndexFilter(indices=[10, 11])
""")
| CDSView |
python | scipy__scipy | scipy/optimize/tests/test_constraint_conversion.py | {
"start": 538,
"end": 2595
} | class ____:
x0 = (2, 0)
bnds = ((0, None), (0, None))
method = "trust-constr"
def test_constraint_dictionary_1(self):
def fun(x):
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
{'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
{'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2})
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "delta_grad == 0.0", UserWarning)
res = minimize(fun, self.x0, method=self.method,
bounds=self.bnds, constraints=cons)
assert_allclose(res.x, [1.4, 1.7], rtol=1e-4)
assert_allclose(res.fun, 0.8, rtol=1e-4)
def test_constraint_dictionary_2(self):
def fun(x):
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
cons = {'type': 'eq',
'fun': lambda x, p1, p2: p1*x[0] - p2*x[1],
'args': (1, 1.1),
'jac': lambda x, p1, p2: np.array([[p1, -p2]])}
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "delta_grad == 0.0", UserWarning)
res = minimize(fun, self.x0, method=self.method,
bounds=self.bnds, constraints=cons)
assert_allclose(res.x, [1.7918552, 1.62895927])
assert_allclose(res.fun, 1.3857466063348418)
def test_constraint_dictionary_3(self):
def fun(x):
return (x[0] - 1) ** 2 + (x[1] - 2.5) ** 2
cons = [{'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
NonlinearConstraint(lambda x: x[0] - x[1], 0, 0)]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "delta_grad == 0.0", UserWarning)
res = minimize(fun, self.x0, method=self.method,
bounds=self.bnds, constraints=cons)
assert_allclose(res.x, [1.75, 1.75], rtol=1e-4)
assert_allclose(res.fun, 1.125, rtol=1e-4)
| TestOldToNew |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 10292,
"end": 10834
} | class ____(test_util.TensorFlowTestCase):
def testRounding(self):
x = np.arange(-5.0, 5.0, .25)
for dtype in [np.float32, np.double, np.int32]:
x_np = np.array(x, dtype=dtype)
with test_util.device(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y_tf = math_ops.round(x_tf)
y_tf_np = self.evaluate(y_tf)
y_np = np.round(x_np)
self.assertAllClose(y_tf_np, y_np, atol=1e-2)
@test_util.with_eager_op_as_function
@test_util.run_all_in_graph_and_eager_modes
| RoundTest |
python | getsentry__sentry | src/sentry/core/endpoints/project_key_stats.py | {
"start": 886,
"end": 4299
} | class ____(ProjectEndpoint, StatsMixin):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.TELEMETRY_EXPERIENCE
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=20, window=1),
RateLimitCategory.USER: RateLimit(limit=20, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=20, window=1),
}
}
)
def get(self, request: Request, project, key_id) -> Response:
try:
key = ProjectKey.objects.for_request(request).get(
project=project, public_key=key_id, roles=F("roles").bitor(ProjectKey.roles.store)
)
except ProjectKey.DoesNotExist:
raise ResourceDoesNotExist
try:
stats_params = self._parse_args(request)
except Exception:
raise ParseError(detail="Invalid request data")
try:
outcomes_query = QueryDefinition(
fields=["sum(quantity)"],
start=stats_params["start"].isoformat(),
end=stats_params["end"].isoformat(),
organization_id=project.organization_id,
outcome=[
Outcome.ACCEPTED.api_name(),
Outcome.FILTERED.api_name(),
Outcome.RATE_LIMITED.api_name(),
],
group_by=["outcome"],
category=["error"],
key_id=key.id,
interval=request.GET.get("resolution", "1d"),
)
results = massage_outcomes_result(
outcomes_query,
[],
run_outcomes_query_timeseries(
outcomes_query, tenant_ids={"organization_id": project.organization_id}
),
)
except Exception:
raise ParseError(detail="Invalid request data")
# Initialize the response results.
response = []
for time_string in results["intervals"]:
ts = parse_timestamp(time_string)
assert ts is not None
response.append(
{
"ts": int(ts.timestamp()),
"total": 0,
"dropped": 0,
"accepted": 0,
"filtered": 0,
}
)
# We rely on groups and intervals being index aligned
for group_result in results["groups"]:
key = None
grouping = group_result["by"]["outcome"]
if grouping == Outcome.RATE_LIMITED.api_name():
key = "dropped"
elif grouping == Outcome.FILTERED.api_name():
key = "filtered"
elif grouping == Outcome.ACCEPTED.api_name():
key = "accepted"
else:
capture_exception(
ValueError(f"Unexpected outcome result in project key stats {grouping}")
)
if key:
# We rely on series being index aligned with intervals.
for i, value in enumerate(group_result["series"]["sum(quantity)"]):
response[i][key] += value
response[i]["total"] += value
return Response(response)
| ProjectKeyStatsEndpoint |
python | neetcode-gh__leetcode | python/0215-kth-largest-element-in-an-array.py | {
"start": 707,
"end": 1312
} | class ____:
def findKthLargest(self, nums: List[int], k: int) -> int:
pivot = random.choice(nums)
left = [num for num in nums if num > pivot]
mid = [num for num in nums if num == pivot]
right = [num for num in nums if num < pivot]
length_left = len(left)
length_right = len(right)
length_mid = len(mid)
if k <= length_left:
return self.findKthLargest(left, k)
elif k > length_left + length_mid:
return self.findKthLargest(right, k - length_mid - length_left)
else:
return mid[0]
| Solution2 |
python | dask__dask | dask/tests/test_task_spec.py | {
"start": 31861,
"end": 32353
} | class ____(Task):
__slots__ = ("custom_kwarg_only",)
def __init__(self, key, func, /, *args, custom_kwarg_only, **kwargs):
self.custom_kwarg_only = custom_kwarg_only
super().__init__(key, func, *args, **kwargs)
def test_substitute_subclasses():
t = MySubclass("key", func, "a", TaskRef("b"), custom_kwarg_only="foo")
t2 = t.substitute({"b": "c"})
assert t2.custom_kwarg_only == "foo"
assert t2({"a": "a", "c": "b"}) == t({"a": "a", "b": "b"})
| MySubclass |
python | ray-project__ray | python/ray/data/tests/test_partitioning.py | {
"start": 1952,
"end": 7308
} | class ____:
"""Callable that generates directory path strings for path-based partition formats.
Path-based partition formats embed all partition keys and values directly in
their dataset file paths.
Two path partition formats are currently supported - `HIVE` and `DIRECTORY`.
For `HIVE` Partitioning, all partition directories will be generated using a
`{key1}={value1}/{key2}={value2}` naming convention under the base directory.
An accompanying ordered list of partition key field names must also be
provided, where the order and length of all partition values must match the
order and length of field names
For `DIRECTORY` Partitioning, all directories will be generated from partition
values using a `{value1}/{value2}` naming convention under the base directory.
"""
@staticmethod
def of(
style: PartitionStyle = PartitionStyle.HIVE,
base_dir: Optional[str] = None,
field_names: Optional[List[str]] = None,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
) -> "PathPartitionEncoder":
"""Creates a new partition path encoder.
Args:
style: The partition style - may be either HIVE or DIRECTORY.
base_dir: "/"-delimited base directory that all partition paths will be
generated under (exclusive).
field_names: The partition key field names (i.e. column names for tabular
datasets). Required for HIVE partition paths, optional for DIRECTORY
partition paths. When non-empty, the order and length of partition key
field names must match the order and length of partition values.
filesystem: Filesystem that will be used for partition path file I/O.
Returns:
The new partition path encoder.
"""
scheme = Partitioning(style, base_dir, field_names, None, filesystem)
return PathPartitionEncoder(scheme)
def __init__(self, partitioning: Partitioning):
"""Creates a new partition path encoder.
Args:
partitioning: The path-based partition scheme. All partition paths
will be generated under this scheme's base directory. Field names are
required for HIVE partition paths, optional for DIRECTORY partition
paths. When non-empty, the order and length of partition key field
names must match the order and length of partition values.
"""
style = partitioning.style
field_names = partitioning.field_names
if style == PartitionStyle.HIVE and not field_names:
raise ValueError(
"Hive partition path generation requires a corresponding list of "
"partition key field names. Please retry your request with one "
"or more field names specified."
)
generators = {
PartitionStyle.HIVE: self._as_hive_partition_dirs,
PartitionStyle.DIRECTORY: self._as_directory_partition_dirs,
}
self._encoder_fn: Callable[[List[str]], List[str]] = generators.get(style)
if self._encoder_fn is None:
raise ValueError(
f"Unsupported partition style: {style}. "
f"Supported styles: {generators.keys()}"
)
self._scheme = partitioning
def __call__(self, partition_values: List[str]) -> str:
"""Returns the partition directory path for the given partition value strings.
All files for this partition should be written to this directory. If a base
directory is set, then the partition directory path returned will be rooted in
this base directory.
Args:
partition_values: The partition value strings to include in the partition
path. For HIVE partition paths, the order and length of partition
values must match the order and length of partition key field names.
Returns:
Partition directory path for the given partition values.
"""
partition_dirs = self._as_partition_dirs(partition_values)
return posixpath.join(self._scheme.normalized_base_dir, *partition_dirs)
@property
def scheme(self) -> Partitioning:
"""Returns the partitioning for this encoder."""
return self._scheme
def _as_hive_partition_dirs(self, values: List[str]) -> List[str]:
"""Creates HIVE directory names for the given values."""
field_names = self._scheme.field_names
return [f"{field_names[i]}={val}" for i, val in enumerate(values)]
def _as_directory_partition_dirs(self, values: List[str]) -> List[str]:
"""Creates DIRECTORY partition directory names for the given values."""
return values
def _as_partition_dirs(self, values: List[str]) -> List[str]:
"""Creates a list of partition directory names for the given values."""
field_names = self._scheme.field_names
if field_names:
assert len(values) == len(field_names), (
f"Expected {len(field_names)} partition value(s) but found "
f"{len(values)}: {values}."
)
return self._encoder_fn(values)
@pytest.mark.parametrize("block_type", [pd.DataFrame, pa.Table])
| PathPartitionEncoder |
python | wandb__wandb | wandb/automations/automations.py | {
"start": 550,
"end": 1553
} | class ____(TriggerFields, frozen=False):
"""A local instance of a saved W&B automation that supports editing."""
id: GQLId
created_at: Annotated[datetime, Field(repr=False, frozen=True, alias="createdAt")]
"""The date and time when this automation was created."""
updated_at: Annotated[
Optional[datetime], Field(repr=False, frozen=True, alias="updatedAt")
] = None
"""The date and time when this automation was last updated, if applicable."""
name: str
"""The name of this automation."""
description: Optional[str]
"""An optional description of this automation."""
enabled: bool
"""Whether this automation is enabled. Only enabled automations will trigger."""
event: SavedEvent
"""The event that will trigger this automation."""
scope: AutomationScope
"""The scope in which the triggering event must occur."""
action: SavedAction
"""The action that will execute when this automation is triggered."""
| Automation |
python | huggingface__transformers | src/transformers/models/roc_bert/modeling_roc_bert.py | {
"start": 27770,
"end": 34662
} | class ____(RoCBertPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = RoCBertEmbeddings(config)
self.encoder = RoCBertEncoder(config)
self.pooler = RoCBertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.bert.modeling_bert.BertModel.get_input_embeddings
def get_input_embeddings(self):
return self.embeddings.word_embeddings
# Copied from transformers.models.bert.modeling_bert.BertModel.set_input_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def get_pronunciation_embeddings(self):
return self.embeddings.pronunciation_embed
def set_pronunciation_embeddings(self, value):
self.embeddings.pronunciation_embed = value
def get_shape_embeddings(self):
return self.embeddings.shape_embed
def set_shape_embeddings(self, value):
self.embeddings.shape_embed = value
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
input_shape_ids: Optional[torch.Tensor] = None,
input_pronunciation_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
input_shape_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the shape vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input_shape_ids)
input_pronunciation_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the pronunciation vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input_pronunciation_ids)
"""
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if input_ids is not None:
device = input_ids.device
input_shape = input_ids.shape
else:
device = inputs_embeds.device
input_shape = inputs_embeds.shape[:-1]
seq_length = input_shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
input_shape_ids=input_shape_ids,
input_pronunciation_ids=input_pronunciation_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
attention_mask, encoder_attention_mask = self._create_attention_masks(
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
embedding_output=embedding_output,
encoder_hidden_states=encoder_hidden_states,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
)
# Copied from transformers.models.bert.modeling_bert.BertModel._create_attention_masks
def _create_attention_masks(
self,
attention_mask,
encoder_attention_mask,
embedding_output,
encoder_hidden_states,
cache_position,
past_key_values,
):
if self.config.is_decoder:
attention_mask = create_causal_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
else:
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
)
if encoder_attention_mask is not None:
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
return attention_mask, encoder_attention_mask
@auto_docstring(
custom_intro="""
RoCBert Model with contrastive loss and masked_lm_loss during the pretraining.
"""
)
| RoCBertModel |
python | huggingface__transformers | tests/models/t5/test_modeling_t5.py | {
"start": 36116,
"end": 39222
} | class ____(unittest.TestCase):
def test_fp16_fp32_conversion(self):
r"""
A test to check whether the argument `keep_in_fp32_modules` correctly does its job
"""
orig_import = __import__
accelerate_mock = unittest.mock.Mock()
# mock import of accelerate
def import_accelerate_mock(name, *args, **kwargs):
if name == "accelerate":
if accelerate_available:
return accelerate_mock
else:
raise ImportError
return orig_import(name, *args, **kwargs)
# Load without using `accelerate`
with unittest.mock.patch("builtins.__import__", side_effect=import_accelerate_mock):
accelerate_available = False
model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small", dtype=torch.float16)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16)
# Load without in bf16
model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small", dtype=torch.bfloat16)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16)
# Load using `accelerate` in bf16
model = T5ForConditionalGeneration.from_pretrained(
"google-t5/t5-small", dtype=torch.bfloat16, device_map="auto"
)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16)
# Load using `accelerate` in bf16
model = T5ForConditionalGeneration.from_pretrained(
"google-t5/t5-small",
dtype=torch.bfloat16,
)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16)
# Load without using `accelerate`
model = T5ForConditionalGeneration.from_pretrained(
"google-t5/t5-small",
dtype=torch.float16,
)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16)
# Load using `accelerate`
model = T5ForConditionalGeneration.from_pretrained(
"google-t5/t5-small", dtype=torch.float16, device_map="auto"
)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32)
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16)
@require_torch
@require_sentencepiece
@require_tokenizers
| T5ModelFp16Tests |
python | getsentry__sentry | src/sentry/sentry_apps/api/bases/sentryapps.py | {
"start": 2263,
"end": 3230
} | class ____(SentryPermission):
scope_map = {
"GET": PARANOID_GET,
"POST": ("org:write", "org:admin"),
}
def has_object_permission(self, request: Request, view, context: RpcUserOrganizationContext):
if not hasattr(request, "user") or not request.user:
return False
self.determine_access(request, context)
if superuser_has_permission(request):
return True
# User must be a part of the Org they're trying to create the app in.
if context.organization.status != OrganizationStatus.ACTIVE or not context.member:
raise SentryAppError(
message="User must be a part of the Org they're trying to create the app in",
status_code=401,
)
assert request.method, "method must be present in request to get permissions"
return ensure_scoped_permission(request, self.scope_map.get(request.method))
| SentryAppsPermission |
python | bokeh__bokeh | src/bokeh/core/property/dataspec.py | {
"start": 19248,
"end": 19724
} | class ____(NumberSpec):
""" A |DataSpec| property that accepts non-negative numeric fixed values
for size values or strings that refer to columns in a
:class:`~bokeh.models.sources.ColumnDataSource`.
"""
def prepare_value(self, cls, name, value):
try:
if value < 0:
raise ValueError("Screen sizes must be positive")
except TypeError:
pass
return super().prepare_value(cls, name, value)
| SizeSpec |
python | ipython__ipython | IPython/core/formatters.py | {
"start": 26863,
"end": 27458
} | class ____(BaseFormatter):
"""An SVG formatter.
To define the callables that compute the SVG representation of your
objects, define a :meth:`_repr_svg_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be valid SVG enclosed in
```<svg>``` tags, that could be injected into an existing DOM. It should
*not* include the ```<html>`` or ```<body>`` tags.
"""
format_type = Unicode('image/svg+xml')
print_method = ObjectName('_repr_svg_')
| SVGFormatter |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataplex.py | {
"start": 29634,
"end": 31211
} | class ____:
@mock.patch(ENTRY_GROUP_STR)
@mock.patch(HOOK_STR)
def test_execute(self, hook_mock, entry_group_mock):
op = DataplexCatalogListEntryGroupsOperator(
project_id=PROJECT_ID,
location=REGION,
task_id="list_task",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.list_entry_groups.return_value = ListEntryGroupsPager(
response=(
ListEntryGroupsResponse(
entry_groups=[
{
"name": "aaa",
"description": "Test Entry Group 1",
"display_name": "Entry Group One",
}
]
)
),
method=mock.MagicMock(),
request=ListEntryGroupsRequest(parent=""),
)
entry_group_mock.return_value.to_dict.return_value = None
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.list_entry_groups.assert_called_once_with(
project_id=PROJECT_ID,
location=REGION,
page_size=None,
page_token=None,
filter_by=None,
order_by=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataplexCatalogListEntryGroupsOperator |
python | getsentry__sentry | src/sentry/apidocs/extensions.py | {
"start": 2772,
"end": 3661
} | class ____(OpenApiSerializerFieldExtension):
"""
This extension restricts Sentry's use of the JSONField to mimic a DictField.
It comes from a change in drf-spectacular because rest_framework's JSONField actually accepts
primative, strings, numbers and arrays. drf-spectacular patched this, but in Sentry, we want to
ensure those fields are always given the correct type of 'object'.
issue: https://github.com/tfranzel/drf-spectacular/issues/1095
suggested patch: https://github.com/tfranzel/drf-spectacular/issues/1242#issuecomment-2123492057
"""
target_class = "rest_framework.fields.JSONField"
def map_serializer_field(self, auto_schema, direction):
return build_basic_type(OpenApiTypes.OBJECT)
# TODO: extension to do default error codes on responses.
# https://github.com/tfranzel/drf-spectacular/issues/334
| RestrictedJsonFieldExtension |
python | google__jax | jax/_src/pallas/mosaic/core.py | {
"start": 6985,
"end": 7081
} | class ____(pallas_core.AbstractSemaphoreTy):
type = dma_semaphore
name = "dma_sem"
| DMASemaphore |
python | gevent__gevent | src/gevent/tests/test__socket_dns.py | {
"start": 22184,
"end": 22689
} | class ____(TestCase):
switch_expected = False
if RESOLVER_DNSPYTHON:
# dnspython raises errors for broadcasthost/255.255.255.255, but the system
# can resolve it.
@unittest.skip('ares raises errors for broadcasthost/255.255.255.255')
def test__broadcast__gethostbyaddr(self):
return
test__broadcast__gethostbyname = test__broadcast__gethostbyaddr
add(TestBroadcast, '<broadcast>')
from gevent.resolver._hostsfile import HostsFile
| TestBroadcast |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_tensor_supported_values_test.py | {
"start": 1867,
"end": 2394
} | class ____(extension_type.BatchableExtensionType):
value: tensor.Tensor
@property
def shape(self):
return self.value.shape
@property
def dtype(self):
return self.value.dtype
def __getitem__(self, idx):
return WrappedTensor(self.value.__getitem__(idx))
def set_shape(self, shape):
return self.value.set_shape(shape)
class Spec(type_spec.TypeSpec):
@property
def shape(self):
return self.value.shape
@property
def dtype(self):
return self.value.dtype
| WrappedTensor |
python | numpy__numpy | numpy/_core/tests/test_numerictypes.py | {
"start": 6550,
"end": 6888
} | class ____(CreateValues):
"""Check the creation of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = 1
_buffer = NbufferT
############################################################
# Reading tests
############################################################
| TestCreateValuesNestedMultiple |
python | weaviate__weaviate-python-client | weaviate/groups/sync.py | {
"start": 163,
"end": 253
} | class ____(_GroupsOIDCExecutor[ConnectionSync]):
pass
@executor.wrap("sync")
| _GroupsOIDC |
python | PyCQA__pylint | tests/functional/n/not_async_context_manager.py | {
"start": 618,
"end": 727
} | class ____:
def __aenter__(self):
pass
def __aexit__(self, *args):
pass
| GoodAsyncManager |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/components/shell-script-component/with-config-schema.py | {
"start": 106,
"end": 360
} | class ____(dg.Component, dg.Resolvable):
"""Models a shell script as a Dagster asset."""
script_path: str
asset_specs: Sequence[dg.ResolvedAssetSpec]
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions: ...
| ShellCommand |
python | pyca__cryptography | tests/hazmat/primitives/test_aead.py | {
"start": 36292,
"end": 44123
} | class ____:
@pytest.mark.skipif(
sys.platform not in {"linux", "darwin"} or sys.maxsize < 2**31,
reason="mmap and 64-bit platform required",
)
def test_data_too_large(self):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
large_data = large_mmap()
with pytest.raises(OverflowError):
aessiv.encrypt(large_data, None)
with pytest.raises(OverflowError):
aessiv.encrypt(b"irrelevant", [large_data])
with pytest.raises(OverflowError):
aessiv.decrypt(b"very very irrelevant", [large_data])
def test_empty(self):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
if rust_openssl.CRYPTOGRAPHY_OPENSSL_350_OR_GREATER:
assert (
AESSIV(
b"+'\xe4)\xfbl\x02g\x8eX\x9c\xccD7\xc5\xad\xfbD\xb31\xabm!\xea2\x17'\xe6\xec\x03\xd3T"
).encrypt(b"", [b""])
== b"\xb2\xb25N7$\xdc\xda\xa8^\xcf\x02\x9bI\xa9\x0c"
)
else:
with pytest.raises(ValueError):
aessiv.encrypt(b"", None)
with pytest.raises(ValueError):
buf = bytearray(16)
aessiv.encrypt_into(b"", None, buf)
with pytest.raises(InvalidTag):
aessiv.decrypt(b"", None)
def test_vectors(self, backend, subtests):
vectors = load_vectors_from_file(
os.path.join("ciphers", "AES", "SIV", "openssl.txt"),
load_nist_vectors,
)
for vector in vectors:
with subtests.test():
key = binascii.unhexlify(vector["key"])
aad1 = vector.get("aad", None)
aad2 = vector.get("aad2", None)
aad3 = vector.get("aad3", None)
aad = [
binascii.unhexlify(a)
for a in (aad1, aad2, aad3)
if a is not None
]
ct = binascii.unhexlify(vector["ciphertext"])
tag = binascii.unhexlify(vector["tag"])
pt = binascii.unhexlify(vector.get("plaintext", b""))
aessiv = AESSIV(key)
computed_ct = aessiv.encrypt(pt, aad)
assert computed_ct[:16] == tag
assert computed_ct[16:] == ct
computed_pt = aessiv.decrypt(computed_ct, aad)
assert computed_pt == pt
def test_vectors_invalid(self, backend, subtests):
vectors = load_vectors_from_file(
os.path.join("ciphers", "AES", "SIV", "openssl.txt"),
load_nist_vectors,
)
for vector in vectors:
with subtests.test():
key = binascii.unhexlify(vector["key"])
aad1 = vector.get("aad", None)
aad2 = vector.get("aad2", None)
aad3 = vector.get("aad3", None)
aad = [
binascii.unhexlify(a)
for a in (aad1, aad2, aad3)
if a is not None
]
ct = binascii.unhexlify(vector["ciphertext"])
aessiv = AESSIV(key)
with pytest.raises(InvalidTag):
badkey = AESSIV(AESSIV.generate_key(256))
badkey.decrypt(ct, aad)
with pytest.raises(InvalidTag):
aessiv.decrypt(ct, [*aad, b""])
with pytest.raises(InvalidTag):
aessiv.decrypt(ct, [b"nonsense"])
with pytest.raises(InvalidTag):
aessiv.decrypt(b"nonsense", aad)
@pytest.mark.parametrize(
("data", "associated_data"),
[
[object(), [b""]],
[b"data" * 5, [object()]],
[b"data" * 5, b""],
],
)
def test_params_not_bytes(self, data, associated_data, backend):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
with pytest.raises(TypeError):
aessiv.encrypt(data, associated_data)
with pytest.raises(TypeError):
aessiv.decrypt(data, associated_data)
def test_bad_key(self, backend):
with pytest.raises(TypeError):
AESSIV(object()) # type:ignore[arg-type]
with pytest.raises(ValueError):
AESSIV(b"0" * 31)
def test_bad_generate_key(self, backend):
with pytest.raises(TypeError):
AESSIV.generate_key(object()) # type:ignore[arg-type]
with pytest.raises(ValueError):
AESSIV.generate_key(128)
def test_data_too_short(self, backend):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
with pytest.raises(InvalidTag):
aessiv.decrypt(b"tooshort", None)
with pytest.raises(InvalidTag):
buf = bytearray(16)
aessiv.decrypt_into(b"tooshort", None, buf)
def test_associated_data_none_equal_to_empty_list(self, backend):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
ct1 = aessiv.encrypt(b"some_data", None)
ct2 = aessiv.encrypt(b"some_data", [])
assert ct1 == ct2
pt1 = aessiv.decrypt(ct1, None)
pt2 = aessiv.decrypt(ct2, [])
assert pt1 == pt2
def test_buffer_protocol(self, backend):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
pt = b"encrypt me"
ad = [b"additional"]
ct = aessiv.encrypt(pt, ad)
computed_pt = aessiv.decrypt(ct, ad)
assert computed_pt == pt
aessiv = AESSIV(bytearray(key))
ct2 = aessiv.encrypt(pt, ad)
assert ct2 == ct
computed_pt2 = aessiv.decrypt(ct2, ad)
assert computed_pt2 == pt
def test_encrypt_into(self, backend):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
pt = b"encrypt me"
ad = [b"additional"]
buf = bytearray(len(pt) + 16)
n = aessiv.encrypt_into(pt, ad, buf)
assert n == len(pt) + 16
ct = aessiv.encrypt(pt, ad)
assert buf == ct
@pytest.mark.parametrize(
("ptlen", "buflen"), [(10, 25), (10, 27), (15, 30), (20, 37)]
)
def test_encrypt_into_buffer_incorrect_size(self, ptlen, buflen, backend):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
pt = b"x" * ptlen
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
aessiv.encrypt_into(pt, None, buf)
def test_decrypt_into(self, backend):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
pt = b"decrypt me"
ad = [b"additional"]
ct = aessiv.encrypt(pt, ad)
buf = bytearray(len(pt))
n = aessiv.decrypt_into(ct, ad, buf)
assert n == len(pt)
assert buf == pt
@pytest.mark.parametrize(
("ctlen", "buflen"), [(26, 9), (26, 11), (31, 14), (36, 21)]
)
def test_decrypt_into_buffer_incorrect_size(self, ctlen, buflen, backend):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
ct = b"x" * ctlen
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
aessiv.decrypt_into(ct, None, buf)
def test_decrypt_into_invalid_tag(self, backend):
key = AESSIV.generate_key(256)
aessiv = AESSIV(key)
pt = b"some data"
ad = [b"additional"]
ct = aessiv.encrypt(pt, ad)
# Corrupt the ciphertext
corrupted_ct = bytearray(ct)
corrupted_ct[0] ^= 1
buf = bytearray(len(pt))
with pytest.raises(InvalidTag):
aessiv.decrypt_into(bytes(corrupted_ct), ad, buf)
@pytest.mark.skipif(
not _aead_supported(AESGCMSIV),
reason="Does not support AESGCMSIV",
)
| TestAESSIV |
python | matplotlib__matplotlib | tools/subset.py | {
"start": 12916,
"end": 15334
} | class ____:
def __init__(self, data):
_, numTables, _, _, _ = struct.unpack('>IHHHH', data[:12])
self.tables = {}
for i in range(numTables):
tag, _, offset, length = struct.unpack(
'>4sIII', data[12 + 16 * i: 28 + 16 * i])
self.tables[tag] = data[offset: offset + length]
def hhea(self):
r = {}
d = self.tables['hhea']
r['Ascender'], r['Descender'], r['LineGap'] = struct.unpack(
'>hhh', d[4:10])
return r
def os2(self):
r = {}
d = self.tables['OS/2']
r['fsSelection'], = struct.unpack('>H', d[62:64])
r['sTypoAscender'], r['sTypoDescender'], r['sTypoLineGap'] = \
struct.unpack('>hhh', d[68:74])
r['usWinAscender'], r['usWinDescender'] = struct.unpack(
'>HH', d[74:78])
return r
def set_os2(pe, name, val):
print(f'SetOS2Value("{name}", {val:d})', file=pe)
def set_os2_vert(pe, name, val):
set_os2(pe, name + 'IsOffset', 0)
set_os2(pe, name, val)
# Extract vertical metrics data directly out of font file, and emit
# script code to set the values in the generated font. This is a (rather
# ugly) workaround for the issue described in:
# https://sourceforge.net/p/fontforge/mailman/fontforge-users/thread/20100906085718.GB1907@khaled-laptop/
def extract_vert_to_script(font_in, pe):
with open(font_in, 'rb') as in_file:
data = in_file.read()
sfnt = Sfnt(data)
hhea = sfnt.hhea()
os2 = sfnt.os2()
set_os2_vert(pe, "WinAscent", os2['usWinAscender'])
set_os2_vert(pe, "WinDescent", os2['usWinDescender'])
set_os2_vert(pe, "TypoAscent", os2['sTypoAscender'])
set_os2_vert(pe, "TypoDescent", os2['sTypoDescender'])
set_os2_vert(pe, "HHeadAscent", hhea['Ascender'])
set_os2_vert(pe, "HHeadDescent", hhea['Descender'])
def main(argv):
optlist, args = getopt.gnu_getopt(argv, '', [
'string=', 'strip_names', 'opentype-features', 'simplify', 'new',
'script', 'nmr', 'roundtrip', 'subset=', 'namelist', 'null', 'nd',
'move-display'])
font_in, font_out = args
opts = dict(optlist)
if '--string' in opts:
subset = map(ord, opts['--string'])
else:
subset = getsubset(opts.get('--subset', 'latin'), font_in)
subset_font(font_in, font_out, subset, opts)
if __name__ == '__main__':
main(sys.argv[1:])
| Sfnt |
python | apache__thrift | test/py/TestFrozen.py | {
"start": 4883,
"end": 5500
} | class ____(TestFrozenBase):
def protocol(self, trans):
return TCompactProtocol.TCompactProtocolAcceleratedFactory(fallback=False).getProtocol(trans)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestFrozen))
suite.addTest(loader.loadTestsFromTestCase(TestFrozenAcceleratedBinary))
suite.addTest(loader.loadTestsFromTestCase(TestFrozenAcceleratedCompact))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
| TestFrozenAcceleratedCompact |
python | wandb__wandb | wandb/automations/events.py | {
"start": 11988,
"end": 13161
} | class ____(_BaseRunEventInput):
"""A run state changes.
Examples:
Define an event that triggers for any run in project "my-project" when
its state changes to "finished" (i.e. succeeded) or "failed":
```python
from wandb import Api
from wandb.automations import OnRunState
api = Api()
project = api.project(name="my-project")
event = OnRunState(
scope=project,
filter=RunEvent.state.in_(["finished", "failed"]),
)
```
"""
event_type: Literal[EventType.RUN_STATE] = EventType.RUN_STATE
filter: JsonEncoded[RunStateFilter]
"""Run state condition(s) that must be satisfied for this event to trigger."""
# for type annotations
InputEvent = Annotated[
Union[
OnLinkArtifact,
OnAddArtifactAlias,
OnCreateArtifact,
OnRunMetric,
OnRunState,
],
Field(discriminator="event_type"),
]
# for runtime type checks
InputEventTypes: tuple[type, ...] = get_args(InputEvent.__origin__) # type: ignore[attr-defined]
# ----------------------------------------------------------------------------
| OnRunState |
python | allegroai__clearml | clearml/backend_api/services/v2_20/queues.py | {
"start": 58671,
"end": 60296
} | class ____(Response):
"""
Response of queues.get_next_task endpoint.
:param entry: Entry information
:type entry: Entry
"""
_service = "queues"
_action = "get_next_task"
_version = "2.20"
_schema = {
"definitions": {
"entry": {
"properties": {
"added": {
"description": "Time this entry was added to the queue",
"format": "date-time",
"type": ["string", "null"],
},
"task": {
"description": "Queued task ID",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"entry": {
"description": "Entry information",
"oneOf": [{"$ref": "#/definitions/entry"}, {"type": "null"}],
}
},
"type": "object",
}
def __init__(self, entry: Any = None, **kwargs: Any) -> None:
super(GetNextTaskResponse, self).__init__(**kwargs)
self.entry = entry
@schema_property("entry")
def entry(self) -> Any:
return self._property_entry
@entry.setter
def entry(self, value: Any) -> None:
if value is None:
self._property_entry = None
return
if isinstance(value, dict):
value = Entry.from_dict(value)
else:
self.assert_isinstance(value, "entry", Entry)
self._property_entry = value
| GetNextTaskResponse |
python | kamyu104__LeetCode-Solutions | Python/longest-substring-of-one-repeating-character.py | {
"start": 976,
"end": 2456
} | class ____(object):
def longestRepeating(self, s, queryCharacters, queryIndices):
"""
:type s: str
:type queryCharacters: str
:type queryIndices: List[int]
:rtype: List[int]
"""
LEFT, RIGHT, LEFT_LEN, RIGHT_LEN, LEN, MAX_LEN, SIZE = xrange(7)
def build(i):
return update(s[i])
def update(y):
result = [0]*SIZE
result[LEFT] = result[RIGHT] = y
result[LEN] = result[LEFT_LEN] = result[RIGHT_LEN] = result[MAX_LEN] = 1
return result
def query(x, y):
return x if y is None else \
[x[LEFT],
y[RIGHT],
x[LEFT_LEN]+(y[LEFT_LEN] if x[LEFT_LEN] == x[LEN] and x[RIGHT] == y[LEFT] else 0),
y[RIGHT_LEN]+(x[RIGHT_LEN] if y[RIGHT_LEN] == y[LEN] and y[LEFT] == x[RIGHT] else 0),
x[LEN]+y[LEN],
max(x[MAX_LEN], y[MAX_LEN], x[RIGHT_LEN]+y[LEFT_LEN] if x[RIGHT] == y[LEFT] else 0)]
result = []
st = SegmentTree(len(s), build_fn=build, query_fn=query, update_fn=update)
for c, i in itertools.izip(queryCharacters, queryIndices):
st.update(i, c)
result.append(st.tree[1][MAX_LEN])
return result
# Time: O(nlogn)
# Space: O(n)
import itertools
# Template:
# https://github.com/kamyu104/FacebookHackerCup-2021/blob/main/Round%203/auth_ore_ization.py
| Solution |
python | pytorch__pytorch | tools/stats/utilization_stats_lib.py | {
"start": 1390,
"end": 1861
} | class ____(DataClassJsonMixin): # type: ignore[misc, no-any-unimported]
level: str
timestamp: int
data: RecordData | None = None
cmd_names: list[str] | None = None
error: str | None = None
log_duration: str | None = None
logs: list[str] | None = None
# the db schema related to this is:
# https://github.com/pytorch/test-infra/blob/main/clickhouse_db_schema/oss_ci_utilization/oss_ci_utilization_metadata_schema.sql
@dataclass
| UtilizationRecord |
python | spack__spack | lib/spack/spack/builder.py | {
"start": 9224,
"end": 13164
} | class ____(BuilderMeta):
"""Metaclass to adapt old-style packages to the new architecture based on builders
for the installation phase.
This class does the necessary mangling to function argument so that a call to a
builder object can delegate to a package object.
"""
@staticmethod
def phase_method_adapter(phase_name):
def _adapter(self, pkg, spec, prefix):
phase_fn = getattr(self.pkg_with_dispatcher, phase_name)
return phase_fn(spec, prefix)
return _adapter
@staticmethod
def legacy_long_method_adapter(method_name):
def _adapter(self, spec, prefix):
bind_method = getattr(self.pkg_with_dispatcher, method_name)
return bind_method(spec, prefix)
return _adapter
@staticmethod
def legacy_method_adapter(method_name):
def _adapter(self):
bind_method = getattr(self.pkg_with_dispatcher, method_name)
return bind_method()
return _adapter
@staticmethod
def legacy_attribute_adapter(attribute_name):
def _adapter(self):
return getattr(self.pkg_with_dispatcher, attribute_name)
return property(_adapter)
@staticmethod
def combine_callbacks(pipeline_attribute_name):
"""This function combines callbacks from old-style packages with callbacks that might
be registered for the default builder.
It works by:
1. Extracting the callbacks from the old-style package
2. Transforming those callbacks by adding an adapter that receives a builder as argument
and calls the wrapped function with ``builder.pkg``
3. Combining the list of transformed callbacks with those that might be present in the
default builder
"""
def _adapter(self):
def unwrap_pkg(fn):
@functools.wraps(fn)
def _wrapped(builder):
return fn(builder.pkg_with_dispatcher)
return _wrapped
# Concatenate the current list with the one from package
callbacks_from_package = getattr(self.pkg, pipeline_attribute_name, [])
callbacks_from_package = [(key, unwrap_pkg(x)) for key, x in callbacks_from_package]
callbacks_from_builder = getattr(super(type(self), self), pipeline_attribute_name, [])
return callbacks_from_package + callbacks_from_builder
return property(_adapter)
def __new__(mcs, name, bases, attr_dict):
# Add ways to intercept methods and attribute calls and dispatch
# them first to a package object
default_builder_cls = bases[0]
for phase_name in default_builder_cls.phases:
attr_dict[phase_name] = _PackageAdapterMeta.phase_method_adapter(phase_name)
for method_name in package_methods(default_builder_cls):
attr_dict[method_name] = _PackageAdapterMeta.legacy_method_adapter(method_name)
# These exist e.g. for Python, see discussion in https://github.com/spack/spack/pull/32068
for method_name in package_long_methods(default_builder_cls):
attr_dict[method_name] = _PackageAdapterMeta.legacy_long_method_adapter(method_name)
for attribute_name in package_attributes(default_builder_cls):
attr_dict[attribute_name] = _PackageAdapterMeta.legacy_attribute_adapter(
attribute_name
)
combine_callbacks = _PackageAdapterMeta.combine_callbacks
attr_dict[spack.phase_callbacks._RUN_BEFORE.attribute_name] = combine_callbacks(
spack.phase_callbacks._RUN_BEFORE.attribute_name
)
attr_dict[spack.phase_callbacks._RUN_AFTER.attribute_name] = combine_callbacks(
spack.phase_callbacks._RUN_AFTER.attribute_name
)
return super(_PackageAdapterMeta, mcs).__new__(mcs, name, bases, attr_dict)
| _PackageAdapterMeta |
python | python-excel__xlwt | xlwt/Formatting.py | {
"start": 7900,
"end": 8310
} | class ____(object):
# patterns 0x00 - 0x12
NO_PATTERN = 0x00
SOLID_PATTERN = 0x01
def __init__(self):
self.pattern = self.NO_PATTERN
self.pattern_fore_colour = 0x40
self.pattern_back_colour = 0x41
def _search_key(self):
return (
self.pattern,
self.pattern_fore_colour,
self.pattern_back_colour,
)
| Pattern |
python | huggingface__transformers | src/transformers/utils/attention_visualizer.py | {
"start": 5324,
"end": 10027
} | class ____:
def __init__(self, model_name: str):
config = AutoConfig.from_pretrained(model_name)
self.image_token = "<img>"
if hasattr(config.get_text_config(), "sliding_window"):
self.sliding_window = getattr(config.get_text_config(), "sliding_window", None)
try:
mapped_cls = _get_model_class(config, MODEL_MAPPING)
except Exception:
mapped_cls = _get_model_class(config, MODEL_FOR_PRETRAINING_MAPPING)
if mapped_cls is None:
raise ValueError(f"Model name {model_name} is not supported for attention visualization")
self.mapped_cls = mapped_cls
class _ModelWrapper(mapped_cls, nn.Module):
def __init__(self, config, model_name):
nn.Module.__init__(self)
self.dummy_module = nn.Linear(1, 1)
self.config = config
self.model = _ModelWrapper(config, model_name)
self.model.to(config.dtype)
self.repo_id = model_name
self.config = config
def __call__(self, input_sentence: str, suffix=""):
self.visualize_attention_mask(input_sentence, suffix=suffix)
def visualize_attention_mask(self, input_sentence: str, suffix=""):
model = self.model
kwargs = {}
image_seq_length = None
if self.config.model_type in PROCESSOR_MAPPING_NAMES:
img = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg?download=true"
img = Image.open(io.BytesIO(httpx.get(img, follow_redirects=True).content))
image_seq_length = 5
processor = AutoProcessor.from_pretrained(self.repo_id, image_seq_length=image_seq_length)
if hasattr(processor, "image_token"):
image_token = processor.image_token
else:
image_token = processor.tokenizer.convert_ids_to_tokens([processor.image_token_id])[0]
if image_token:
input_sentence = input_sentence.replace("<img>", image_token)
inputs = processor(images=img, text=input_sentence, suffix=suffix, return_tensors="pt")
self.image_token = processor.tokenizer.convert_ids_to_tokens([processor.image_token_id])[0]
attention_mask = inputs["attention_mask"]
if "token_type_ids" in inputs: # TODO inspect signature of update causal mask
kwargs["token_type_ids"] = inputs["token_type_ids"]
tokens = processor.tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
elif self.config.model_type in TOKENIZER_MAPPING_NAMES:
tokenizer = AutoTokenizer.from_pretrained(self.repo_id)
tokens = tokenizer.tokenize(input_sentence)
attention_mask = tokenizer(input_sentence, return_tensors="pt")["attention_mask"]
else:
raise ValueError(f"Model type {model.config.model_type} does not support attention visualization")
model.config._attn_implementation = "eager"
model.train()
batch_size, seq_length = attention_mask.shape
input_embeds = torch.zeros((batch_size, seq_length, model.config.hidden_size), dtype=self.model.dtype)
cache_position = torch.arange(seq_length)
causal_mask = create_causal_mask(
config=model.config,
input_embeds=input_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=None,
)
if causal_mask is not None:
attention_mask = ~causal_mask.bool()
else:
attention_mask = attention_mask.unsqueeze(1).unsqueeze(1).expand(batch_size, 1, seq_length, seq_length)
top_bottom_border = "##" * (
len(f"Attention visualization for {self.config.model_type} | {self.mapped_cls}") + 4
) # Box width adjusted to text length
side_border = "##"
print(f"\n{top_bottom_border}")
print(
"##"
+ f" Attention visualization for \033[1m{self.config.model_type}:{self.repo_id}\033[0m {self.mapped_cls.__name__}".center(
len(top_bottom_border)
)
+ " "
+ side_border,
)
print(f"{top_bottom_border}")
f_string = generate_attention_matrix_from_mask(
tokens,
attention_mask,
img_token=self.image_token,
sliding_window=getattr(self.config, "sliding_window", None),
token_type_ids=kwargs.get("token_type_ids"),
image_seq_length=image_seq_length,
)
print(f_string)
print(f"{top_bottom_border}")
| AttentionMaskVisualizer |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_emr_base.py | {
"start": 1425,
"end": 2237
} | class ____(EmrBaseSensor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target_states = [TARGET_STATE]
self.failed_states = [FAILED_STATE]
self.response = {} # will be set in tests
def get_emr_response(self, context: Context):
return self.response
@staticmethod
def state_from_response(response):
return response["SomeKey"]["State"]
@staticmethod
def failure_message_from_response(response):
change_reason = response["SomeKey"].get("StateChangeReason")
if change_reason:
return (
f"for code: {change_reason.get('Code', EMPTY_CODE)} "
f"with message {change_reason.get('Message', 'Unknown')}"
)
return None
| EmrBaseSensorSubclass |
python | pypa__hatch | src/hatch/config/model.py | {
"start": 1144,
"end": 7765
} | class ____(LazilyParsedConfig):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._field_mode = FIELD_TO_PARSE
self._field_project = FIELD_TO_PARSE
self._field_shell = FIELD_TO_PARSE
self._field_dirs = FIELD_TO_PARSE
self._field_projects = FIELD_TO_PARSE
self._field_publish = FIELD_TO_PARSE
self._field_template = FIELD_TO_PARSE
self._field_terminal = FIELD_TO_PARSE
@property
def mode(self):
if self._field_mode is FIELD_TO_PARSE:
if "mode" in self.raw_data:
mode = self.raw_data["mode"]
if not isinstance(mode, str):
self.raise_error("must be a string")
valid_modes = ("aware", "local", "project")
if mode not in valid_modes:
self.raise_error(f"must be one of: {', '.join(valid_modes)}")
self._field_mode = mode
else:
self._field_mode = self.raw_data["mode"] = "local"
return self._field_mode
@mode.setter
def mode(self, value):
self.raw_data["mode"] = value
self._field_mode = FIELD_TO_PARSE
@property
def project(self):
if self._field_project is FIELD_TO_PARSE:
if "project" in self.raw_data:
project = self.raw_data["project"]
if not isinstance(project, str):
self.raise_error("must be a string")
self._field_project = project
else:
self._field_project = self.raw_data["project"] = ""
return self._field_project
@project.setter
def project(self, value):
self.raw_data["project"] = value
self._field_project = FIELD_TO_PARSE
@property
def shell(self):
if self._field_shell is FIELD_TO_PARSE:
if "shell" in self.raw_data:
shell = self.raw_data["shell"]
if isinstance(shell, str):
self._field_shell = ShellConfig({"name": shell}, ("shell",))
elif isinstance(shell, dict):
self._field_shell = ShellConfig(shell, ("shell",))
else:
self.raise_error("must be a string or table")
else:
self.raw_data["shell"] = ""
self._field_shell = ShellConfig({"name": ""}, ("shell",))
return self._field_shell
@shell.setter
def shell(self, value):
self.raw_data["shell"] = value
self._field_shell = FIELD_TO_PARSE
@property
def dirs(self):
if self._field_dirs is FIELD_TO_PARSE:
if "dirs" in self.raw_data:
dirs = self.raw_data["dirs"]
if not isinstance(dirs, dict):
self.raise_error("must be a table")
self._field_dirs = DirsConfig(dirs, ("dirs",))
else:
dirs = {}
self.raw_data["dirs"] = dirs
self._field_dirs = DirsConfig(dirs, ("dirs",))
return self._field_dirs
@dirs.setter
def dirs(self, value):
self.raw_data["dirs"] = value
self._field_dirs = FIELD_TO_PARSE
@property
def projects(self):
if self._field_projects is FIELD_TO_PARSE:
if "projects" in self.raw_data:
projects = self.raw_data["projects"]
if not isinstance(projects, dict):
self.raise_error("must be a table")
project_data = {}
for name, data in projects.items():
if isinstance(data, str):
project_data[name] = ProjectConfig({"location": data}, ("projects", name))
elif isinstance(data, dict):
project_data[name] = ProjectConfig(data, ("projects", name))
else:
self.raise_error("must be a string or table", extra_steps=(name,))
self._field_projects = project_data
else:
self._field_projects = self.raw_data["projects"] = {}
return self._field_projects
@projects.setter
def projects(self, value):
self.raw_data["projects"] = value
self._field_projects = FIELD_TO_PARSE
@property
def publish(self):
if self._field_publish is FIELD_TO_PARSE:
if "publish" in self.raw_data:
publish = self.raw_data["publish"]
if not isinstance(publish, dict):
self.raise_error("must be a table")
for name, data in publish.items():
if not isinstance(data, dict):
self.raise_error("must be a table", extra_steps=(name,))
self._field_publish = publish
else:
self._field_publish = self.raw_data["publish"] = {"index": {"repo": "main"}}
return self._field_publish
@publish.setter
def publish(self, value):
self.raw_data["publish"] = value
self._field_publish = FIELD_TO_PARSE
@property
def template(self):
if self._field_template is FIELD_TO_PARSE:
if "template" in self.raw_data:
template = self.raw_data["template"]
if not isinstance(template, dict):
self.raise_error("must be a table")
self._field_template = TemplateConfig(template, ("template",))
else:
template = {}
self.raw_data["template"] = template
self._field_template = TemplateConfig(template, ("template",))
return self._field_template
@template.setter
def template(self, value):
self.raw_data["template"] = value
self._field_template = FIELD_TO_PARSE
@property
def terminal(self):
if self._field_terminal is FIELD_TO_PARSE:
if "terminal" in self.raw_data:
terminal = self.raw_data["terminal"]
if not isinstance(terminal, dict):
self.raise_error("must be a table")
self._field_terminal = TerminalConfig(terminal, ("terminal",))
else:
terminal = {}
self.raw_data["terminal"] = terminal
self._field_terminal = TerminalConfig(terminal, ("terminal",))
return self._field_terminal
@terminal.setter
def terminal(self, value):
self.raw_data["terminal"] = value
self._field_terminal = FIELD_TO_PARSE
| RootConfig |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict2.py | {
"start": 170,
"end": 1357
} | class ____(Movie, total=True):
based_on: str
def get_movie_name(movie: Movie):
return movie.get("name")
name2 = get_movie_name({"name": "ET", "year": 1982})
movie1: Movie = {"name": "Blade Runner", "year": 1982}
movie2: Movie = {
"name": "Blade Runner",
# This should generate an error because
# the type is incorrect.
"year": "1982",
}
movie3: Movie = {"name": "Blade Runner"}
movie4: Movie = {
# This should generate an error because
# the key name is not supported.
"name2": "Blade Runner"
}
movie5: Movie = Movie(movie3)
movie6: Movie = Movie(movie3, year=2030, name="New movie")
book1: BookBasedMovie = {"name": "Moonraker", "year": 1979, "based_on": "Moonraker"}
book2: BookBasedMovie = {"year": 1979, "based_on": "Moonraker"}
book3: BookBasedMovie = {"based_on": "Moonraker"}
book4: BookBasedMovie = {
# This should generate an error because 'author' isn't
# a defined field.
"author": "Ian Fleming",
"based_on": "Moonraker",
}
book5: BookBasedMovie = {
"name": "Moonraker",
"year": 1979,
# This should generate an error because 'based_on' is
# a required field, and it's not provided.
}
| BookBasedMovie |
python | miyuchina__mistletoe | test/test_repr.py | {
"start": 84,
"end": 5736
} | class ____(unittest.TestCase):
def _check_repr_matches(self, token, expected_match):
expected_match = "<mistletoe.{} at 0x".format(expected_match)
output = repr(token)[:len(expected_match)]
self.assertEqual(output, expected_match)
# Block tokens
def test_document(self):
doc = Document("# Foo")
self._check_repr_matches(doc, "block_token.Document with 1 child line_number=1")
def test_heading(self):
doc = Document("# Foo")
self._check_repr_matches(doc.children[0], "block_token.Heading with 1 child line_number=1 level=1")
self._check_repr_matches(doc.children[0].children[0], "span_token.RawText content='Foo'")
def test_subheading(self):
doc = Document("# Foo\n## Bar")
self._check_repr_matches(doc.children[1], "block_token.Heading with 1 child line_number=2 level=2")
self._check_repr_matches(doc.children[1].children[0], "span_token.RawText content='Bar'")
def test_quote(self):
doc = Document("> Foo")
self._check_repr_matches(doc.children[0], "block_token.Quote with 1 child line_number=1")
def test_paragraph(self):
doc = Document("Foo")
self._check_repr_matches(doc.children[0], "block_token.Paragraph with 1 child line_number=1")
def test_blockcode(self):
doc = Document("Foo\n\n\tBar\n\nBaz")
self._check_repr_matches(doc.children[1], "block_token.BlockCode with 1 child line_number=3 language=''")
def test_codefence(self):
doc = Document("""```python\nprint("Hello, World!"\n```""")
self._check_repr_matches(doc.children[0], "block_token.CodeFence with 1 child line_number=1 language='python'")
def test_unordered_list(self):
doc = Document("* Foo\n* Bar\n* Baz")
self._check_repr_matches(doc.children[0], "block_token.List with 3 children line_number=1 loose=False start=None")
self._check_repr_matches(doc.children[0].children[0], "block_token.ListItem with 1 child line_number=1 leader='*' indentation=0 prepend=2 loose=False")
def test_ordered_list(self):
doc = Document("1. Foo\n2. Bar\n3. Baz")
self._check_repr_matches(doc.children[0], "block_token.List with 3 children line_number=1 loose=False start=1")
self._check_repr_matches(doc.children[0].children[0], "block_token.ListItem with 1 child line_number=1 leader='1.' indentation=0 prepend=3 loose=False")
def test_table(self):
doc = Document("| Foo | Bar | Baz |\n|:--- |:---:| ---:|\n| Foo | Bar | Baz |\n")
self._check_repr_matches(doc.children[0], "block_token.Table with 1 child line_number=1 column_align=[None, 0, 1]")
self._check_repr_matches(doc.children[0].children[0], "block_token.TableRow with 3 children line_number=3 row_align=[None, 0, 1]")
self._check_repr_matches(doc.children[0].children[0].children[0], "block_token.TableCell with 1 child line_number=3 align=None")
def test_thematicbreak(self):
doc = Document("Foo\n\n---\n\nBar\n")
self._check_repr_matches(doc.children[1], "block_token.ThematicBreak line_number=3")
# No test for ``Footnote``
def test_htmlblock(self):
try:
block_token.add_token(block_token.HtmlBlock)
doc = Document("<pre>\nFoo\n</pre>\n")
finally:
block_token.reset_tokens()
self._check_repr_matches(doc.children[0], "block_token.HtmlBlock with 1 child line_number=1")
self._check_repr_matches(doc.children[0].children[0], "span_token.RawText content='<pre>\\nFoo\\n</pre>'")
# Span tokens
def test_strong(self):
doc = Document("**foo**\n")
self._check_repr_matches(doc.children[0].children[0], "span_token.Strong with 1 child")
def test_emphasis(self):
doc = Document("*foo*\n")
self._check_repr_matches(doc.children[0].children[0], "span_token.Emphasis with 1 child")
def test_inlinecode(self):
doc = Document("`foo`\n")
self._check_repr_matches(doc.children[0].children[0], "span_token.InlineCode with 1 child")
def test_strikethrough(self):
doc = Document("~~foo~~\n")
self._check_repr_matches(doc.children[0].children[0], "span_token.Strikethrough with 1 child")
def test_image(self):
doc = Document("""\n""")
self._check_repr_matches(doc.children[0].children[0], "span_token.Image with 1 child src='http://www.example.org/' title='bar'")
def test_link(self):
doc = Document("[Foo](http://www.example.org/)\n")
self._check_repr_matches(doc.children[0].children[0], "span_token.Link with 1 child target='http://www.example.org/' title=''")
def test_autolink(self):
doc = Document("Foo <http://www.example.org/>\n")
self._check_repr_matches(doc.children[0].children[1], "span_token.AutoLink with 1 child target='http://www.example.org/' mailto=False")
def test_escapesequence(self):
doc = Document("\\*\n")
self._check_repr_matches(doc.children[0].children[0], "span_token.EscapeSequence with 1 child")
def test_soft_linebreak(self):
doc = Document("Foo\nBar\n")
self._check_repr_matches(doc.children[0].children[1], "span_token.LineBreak content='' soft=True")
def test_hard_linebreak(self):
doc = Document("Foo\\\nBar\n")
self._check_repr_matches(doc.children[0].children[1], "span_token.LineBreak content='\\\\' soft=False")
def test_rawtext(self):
doc = Document("Foo\n")
self._check_repr_matches(doc.children[0].children[0], "span_token.RawText content='Foo'")
| TestRepr |
python | wntrblm__nox | nox/manifest.py | {
"start": 16153,
"end": 18015
} | class ____(Mapping[str, bool]):
"""Eval locals using keywords.
When looking up a local variable the variable name is compared against
the set of keywords. If the local variable name matches any *substring* of
any keyword, then the name lookup returns True. Otherwise, the name lookup
returns False.
"""
def __init__(self, keywords: Iterable[str]) -> None:
self._keywords = frozenset(keywords)
def __getitem__(self, variable_name: str) -> bool:
return any(variable_name in keyword for keyword in self._keywords)
def __iter__(self) -> Iterator[str]:
return iter(self._keywords)
def __len__(self) -> int:
return len(self._keywords)
def keyword_match(expression: str, keywords: Iterable[str]) -> Any:
"""See if an expression matches the given set of keywords."""
# TODO: see if we can use ast.literal_eval here.
locals = KeywordLocals(set(keywords))
return eval(expression, {}, locals) # noqa: S307
def _null_session_func_(session: Session) -> None:
"""A no-op session for parametrized sessions with no available params."""
session.skip("This session had no parameters available.")
def _normalized_session_match(session_name: str, session: SessionRunner) -> bool:
"""Checks if session_name matches session."""
if session_name == session.name or session_name in session.signatures:
return True
for name in session.signatures:
equal_rep = _normalize_arg(session_name) == _normalize_arg(name)
if equal_rep:
return True
# Exhausted
return False
def _normalize_arg(arg: str) -> str:
"""Normalize arg for comparison."""
try:
return str(ast.dump(ast.parse(arg)))
except (TypeError, SyntaxError):
return arg
_null_session_func = Func(_null_session_func_, python=False)
| KeywordLocals |
python | tornadoweb__tornado | tornado/test/auth_test.py | {
"start": 7164,
"end": 7603
} | class ____(TwitterClientHandler):
@gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
self.finish(user)
else:
# New style: with @gen.coroutine the result must be yielded
# or else the request will be auto-finished too soon.
yield self.authorize_redirect()
| TwitterClientLoginGenCoroutineHandler |
python | catalyst-team__catalyst | catalyst/contrib/losses/regression.py | {
"start": 3867,
"end": 4457
} | class ____(nn.Module):
"""RSquareLoss"""
def forward(self, outputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""Compute the loss.
Args:
outputs (torch.Tensor): model outputs
targets (torch.Tensor): targets
Returns:
torch.Tensor: computed loss
"""
var_y = torch.var(targets, unbiased=False)
return 1.0 - F.mse_loss(outputs, targets, reduction="mean") / var_y
__all__ = [
"HuberLossV0",
"CategoricalRegressionLoss",
"QuantileRegressionLoss",
"RSquareLoss",
]
| RSquareLoss |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/actions/create_ticket.py | {
"start": 194,
"end": 846
} | class ____(TicketEventAction):
id = "sentry.integrations.github_enterprise.notify_action.GitHubEnterpriseCreateTicketAction"
label = "Create a GitHub Enterprise issue in {integration} with these "
ticket_type = "a GitHub Enterprise issue"
# TODO(schew2381): Add link to docs once GitHub issue sync is available
link = None
provider = IntegrationProviderSlug.GITHUB_ENTERPRISE.value
def generate_footer(self, rule_url: str) -> str:
return "\nThis issue was automatically created by Sentry via [{}]({})".format(
self.rule.label,
absolute_uri(rule_url),
)
| GitHubEnterpriseCreateTicketAction |
python | google__jax | tests/distributed_initialize_test.py | {
"start": 847,
"end": 1432
} | class ____(jtu.JaxTestCase):
@jtu.skip_under_pytest(
"""Side effects from jax.distributed.initialize conflict with other tests
in the same process. pytest runs multiple tests in the same process."""
)
def test_is_distributed_initialized(self):
port = portpicker.pick_unused_port() # type: ignore
self.assertFalse(jax.distributed.is_initialized())
jax.distributed.initialize(f"localhost:{port}", 1, 0)
self.assertTrue(jax.distributed.is_initialized())
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| DistributedInitializeTest |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_betweenness_centrality.py | {
"start": 28150,
"end": 33669
} | class ____:
def test_K5(self):
"""Edge betweenness centrality: K5"""
G = nx.complete_graph(5)
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
b_answer = dict.fromkeys(G.edges(), 1)
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_C4(self):
"""Edge betweenness centrality: C4"""
G = nx.cycle_graph(4)
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2}
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_P4(self):
"""Edge betweenness centrality: P4"""
G = nx.path_graph(4)
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3}
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_balanced_tree(self):
"""Edge betweenness centrality: balanced tree"""
G = nx.balanced_tree(r=2, h=2)
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6}
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_weighted_graph(self):
"""Edge betweenness centrality: weighted"""
eList = [
(0, 1, 5),
(0, 2, 4),
(0, 3, 3),
(0, 4, 2),
(1, 2, 4),
(1, 3, 1),
(1, 4, 3),
(2, 4, 5),
(3, 4, 4),
]
G = nx.Graph()
G.add_weighted_edges_from(eList)
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
b_answer = {
(0, 1): 0.0,
(0, 2): 1.0,
(0, 3): 2.0,
(0, 4): 1.0,
(1, 2): 2.0,
(1, 3): 3.5,
(1, 4): 1.5,
(2, 4): 1.0,
(3, 4): 0.5,
}
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_normalized_weighted_graph(self):
"""Edge betweenness centrality: normalized weighted"""
eList = [
(0, 1, 5),
(0, 2, 4),
(0, 3, 3),
(0, 4, 2),
(1, 2, 4),
(1, 3, 1),
(1, 4, 3),
(2, 4, 5),
(3, 4, 4),
]
G = nx.Graph()
G.add_weighted_edges_from(eList)
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True)
b_answer = {
(0, 1): 0.0,
(0, 2): 1.0,
(0, 3): 2.0,
(0, 4): 1.0,
(1, 2): 2.0,
(1, 3): 3.5,
(1, 4): 1.5,
(2, 4): 1.0,
(3, 4): 0.5,
}
norm = len(G) * (len(G) - 1) / 2
for n in sorted(G.edges()):
assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7)
def test_weighted_multigraph(self):
"""Edge betweenness centrality: weighted multigraph"""
eList = [
(0, 1, 5),
(0, 1, 4),
(0, 2, 4),
(0, 3, 3),
(0, 3, 3),
(0, 4, 2),
(1, 2, 4),
(1, 3, 1),
(1, 3, 2),
(1, 4, 3),
(1, 4, 4),
(2, 4, 5),
(3, 4, 4),
(3, 4, 4),
]
G = nx.MultiGraph()
G.add_weighted_edges_from(eList)
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False)
b_answer = {
(0, 1, 0): 0.0,
(0, 1, 1): 0.5,
(0, 2, 0): 1.0,
(0, 3, 0): 0.75,
(0, 3, 1): 0.75,
(0, 4, 0): 1.0,
(1, 2, 0): 2.0,
(1, 3, 0): 3.0,
(1, 3, 1): 0.0,
(1, 4, 0): 1.5,
(1, 4, 1): 0.0,
(2, 4, 0): 1.0,
(3, 4, 0): 0.25,
(3, 4, 1): 0.25,
}
for n in sorted(G.edges(keys=True)):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_normalized_weighted_multigraph(self):
"""Edge betweenness centrality: normalized weighted multigraph"""
eList = [
(0, 1, 5),
(0, 1, 4),
(0, 2, 4),
(0, 3, 3),
(0, 3, 3),
(0, 4, 2),
(1, 2, 4),
(1, 3, 1),
(1, 3, 2),
(1, 4, 3),
(1, 4, 4),
(2, 4, 5),
(3, 4, 4),
(3, 4, 4),
]
G = nx.MultiGraph()
G.add_weighted_edges_from(eList)
b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True)
b_answer = {
(0, 1, 0): 0.0,
(0, 1, 1): 0.5,
(0, 2, 0): 1.0,
(0, 3, 0): 0.75,
(0, 3, 1): 0.75,
(0, 4, 0): 1.0,
(1, 2, 0): 2.0,
(1, 3, 0): 3.0,
(1, 3, 1): 0.0,
(1, 4, 0): 1.5,
(1, 4, 1): 0.0,
(2, 4, 0): 1.0,
(3, 4, 0): 0.25,
(3, 4, 1): 0.25,
}
norm = len(G) * (len(G) - 1) / 2
for n in sorted(G.edges(keys=True)):
assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7)
| TestWeightedEdgeBetweennessCentrality |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_mixin.py | {
"start": 1790,
"end": 2276
} | class ____(
testing.AssertsCompiledSQL,
fixtures.TestBase,
testing.AssertsExecutionResults,
):
def setup_test(self):
global Base, mapper_registry
mapper_registry = registry(metadata=MetaData())
class Base(DeclarativeBase):
registry = mapper_registry
def teardown_test(self):
close_all_sessions()
clear_mappers()
with testing.db.begin() as conn:
Base.metadata.drop_all(conn)
| DeclarativeTestBase |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 27071,
"end": 27884
} | class ____(
roles.DMLColumnRole,
roles.DDLConstraintColumnRole,
roles.ColumnsClauseRole,
CompilerElement,
):
"""A compiler-only column element used for ad-hoc string compilations.
.. versionadded:: 2.0
"""
__slots__ = ()
_propagate_attrs = util.EMPTY_DICT
_is_collection_aggregate = False
_is_implicitly_boolean = False
def _with_binary_element_type(self, type_):
raise NotImplementedError()
def _gen_cache_key(self, anon_map, bindparams):
raise NotImplementedError()
@property
def _from_objects(self) -> List[FromClause]:
raise NotImplementedError()
# SQLCoreOperations should be suiting the ExpressionElementRole
# and ColumnsClauseRole. however the MRO issues become too elaborate
# at the moment.
| CompilerColumnElement |
python | readthedocs__readthedocs.org | readthedocs/gold/tests/test_signals.py | {
"start": 257,
"end": 1318
} | class ____(PaymentMixin, TestCase):
def setUp(self):
super().setUp()
self.user = fixture.get(User)
@requests_mock.Mocker(kw="mock_request")
def test_delete_subscription(self, mock_request):
subscription = fixture.get(GoldUser, user=self.user, stripe_id="cus_123")
self.assertIsNotNone(subscription)
mock_request.get(
"https://api.stripe.com/v1/customers/cus_123",
json={"id": "cus_123", "object": "customer"},
)
mock_request.delete(
"https://api.stripe.com/v1/customers/cus_123",
json={"deleted": True, "customer": "cus_123"},
)
subscription.delete()
assert mock_request.request_history[0]._request.method == "GET"
assert mock_request.request_history[0]._request.url == "https://api.stripe.com/v1/customers/cus_123"
assert mock_request.request_history[1]._request.method == "DELETE"
assert mock_request.request_history[1]._request.url == "https://api.stripe.com/v1/customers/cus_123"
| GoldSignalTests |
python | streamlit__streamlit | lib/tests/streamlit/toast_test.py | {
"start": 925,
"end": 3154
} | class ____(DeltaGeneratorTestCase):
def test_just_text(self):
"""Test that it can be called with just text."""
st.toast("toast text")
c = self.get_delta_from_queue().new_element.toast
assert c.body == "toast text"
assert c.icon == ""
assert c.duration == 4.0
def test_no_text(self):
"""Test that an error is raised if no text is provided."""
with pytest.raises(StreamlitAPIException) as e:
st.toast("")
assert str(e.value) == "Toast body cannot be blank - please provide a message."
def test_valid_icon(self):
"""Test that it can be called passing a valid emoji as icon."""
st.toast("toast text", icon="🦄")
c = self.get_delta_from_queue().new_element.toast
assert c.body == "toast text"
assert c.icon == "🦄"
assert c.duration == 4.0
def test_invalid_icon(self):
"""Test that an error is raised if an invalid icon is provided."""
with pytest.raises(StreamlitAPIException) as e:
st.toast("toast text", icon="invalid")
assert str(e.value) == (
'The value "invalid" is not a valid emoji. Shortcodes '
"are not allowed, please use a single character instead."
)
@parameterized.expand([("short", 4), ("long", 10), ("infinite", 0), (10, 10)])
def test_duration_variants(
self: ToastTest,
duration: Literal["short", "long", "infinite"] | int,
expected_duration: float,
) -> None:
"""Test all supported duration values, including default and None."""
st.toast("toast text", duration=duration)
c = self.get_delta_from_queue().new_element.toast
assert c.body == "toast text"
assert c.HasField("duration")
assert c.duration == expected_duration
def test_invalid_duration(self):
"""Test that an error is raised if an invalid duration is provided."""
with pytest.raises(StreamlitValueError) as e:
st.toast("toast text", duration="invalid")
assert (
str(e.value)
== "Invalid `duration` value. Supported values: short, long, infinite, a positive integer."
)
| ToastTest |
python | ray-project__ray | python/ray/_private/runtime_env/agent/runtime_env_agent.py | {
"start": 1859,
"end": 2201
} | class ____:
# Whether or not the env was installed correctly.
success: bool
# If success is True, will be a serialized RuntimeEnvContext
# If success is False, will be an error message.
result: str
# The time to create a runtime env in ms.
creation_time_ms: int
# e.g., "working_dir"
UriType = str
| CreatedEnvResult |
python | django-haystack__django-haystack | haystack/backends/__init__.py | {
"start": 35636,
"end": 36622
} | class ____:
backend = BaseSearchBackend
query = BaseSearchQuery
unified_index = UnifiedIndex
def __init__(self, using=None):
if using is None:
using = DEFAULT_ALIAS
self.using = using
self.options = settings.HAYSTACK_CONNECTIONS.get(self.using, {})
self.queries = []
self._index = None
self._backend = None
def get_backend(self):
if self._backend is None:
self._backend = self.backend(self.using, **self.options)
return self._backend
def reset_sessions(self):
"""Reset any transient connections, file handles, etc."""
self._backend = None
def get_query(self):
return self.query(using=self.using)
def reset_queries(self):
del self.queries[:]
def get_unified_index(self):
if self._index is None:
self._index = self.unified_index(self.options.get("EXCLUDED_INDEXES", []))
return self._index
| BaseEngine |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_display_units06.py | {
"start": 315,
"end": 1205
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_display_units06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [93548544, 93550464]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.set_y_axis({"display_units": "millions", "display_units_visible": 0})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | doocs__leetcode | solution/1000-1099/1048.Longest String Chain/Solution2.py | {
"start": 0,
"end": 397
} | class ____:
def longestStrChain(self, words: List[str]) -> int:
words.sort(key=lambda x: len(x))
res = 0
mp = {}
for word in words:
x = 1
for i in range(len(word)):
pre = word[:i] + word[i + 1 :]
x = max(x, mp.get(pre, 0) + 1)
mp[word] = x
res = max(res, x)
return res
| Solution |
python | catalyst-team__catalyst | catalyst/loggers/comet.py | {
"start": 284,
"end": 6858
} | class ____(ILogger):
"""Comet logger for parameters, metrics, images and other artifacts
(videos, audio, model checkpoints, etc.).
You will need a Comet API Key to log your Catalyst runs to Comet.
You can sign up for a free account here: https://www.comet.ml/signup
Check out our Quickstart Guide to learn more:
https://www.comet.ml/docs/quick-start/.
Args:
workspace: Workspace to log the experiment.
project_name: Project to log the experiment.
experiment_id: Experiment ID of a previously logged Experiment.
Used to continue logging to an existing experiment (resume experiment).
comet_mode: Specifies whether to run an Online Experiment
or Offline Experiment
tags: A list of tags to add to the Experiment.
experiment_kwargs: Used to pass additional arguments to
the Experiment object
log_batch_metrics: boolean flag to log batch metrics
(default: SETTINGS.log_batch_metrics or False).
log_epoch_metrics: boolean flag to log epoch metrics
(default: SETTINGS.log_epoch_metrics or True).
Python API examples:
.. code-block:: python
from catalyst import dl
runner = dl.SupervisedRunner()
runner.train(
...
loggers={
"comet": dl.CometLogger(
project_name="my-comet-project"
)
}
)
.. code-block:: python
from catalyst import dl
class CustomRunner(dl.IRunner):
# ...
def get_loggers(self):
return {
"console": dl.ConsoleLogger(),
"comet": dl.CometLogger(
project_name="my-comet-project"
)
}
# ...
runner = CustomRunner().run()
"""
def __init__(
self,
workspace: Optional[str] = None,
project_name: Optional[str] = None,
experiment_id: Optional[str] = None,
comet_mode: str = "online",
tags: List[str] = None,
logging_frequency: int = 1,
log_batch_metrics: bool = SETTINGS.log_batch_metrics,
log_epoch_metrics: bool = SETTINGS.log_epoch_metrics,
**experiment_kwargs: Dict,
) -> None:
super().__init__(
log_batch_metrics=log_batch_metrics, log_epoch_metrics=log_epoch_metrics
)
self.comet_mode = comet_mode
self.workspace = workspace
self.project_name = project_name
self.experiment_id = experiment_id
self.experiment_kwargs = experiment_kwargs
self.comet_mode = comet_mode
self.logging_frequency = logging_frequency
self.experiment = self._get_experiment(self.comet_mode, self.experiment_id)
self.experiment.log_other("Created from", "Catalyst")
if tags is not None:
self.experiment.add_tags(tags)
@property
def logger(self):
"""Internal logger/experiment/etc. from the monitoring system."""
return self.experiment
def _get_experiment(self, mode, experiment_id=None):
if mode == "offline":
if experiment_id is not None:
return comet_ml.ExistingOfflineExperiment(
previous_experiment=experiment_id,
workspace=self.workspace,
project_name=self.project_name,
**self.experiment_kwargs,
)
return comet_ml.OfflineExperiment(
workspace=self.workspace,
project_name=self.project_name,
**self.experiment_kwargs,
)
else:
if experiment_id is not None:
return comet_ml.ExistingExperiment(
previous_experiment=experiment_id,
workspace=self.workspace,
project_name=self.project_name,
**self.experiment_kwargs,
)
return comet_ml.Experiment(
workspace=self.workspace,
project_name=self.project_name,
**self.experiment_kwargs,
)
def log_artifact(
self,
tag: str,
runner: "IRunner",
artifact: object = None,
path_to_artifact: str = None,
scope: str = None,
) -> None:
"""Logs artifact (any arbitrary file or object) to the logger."""
metadata_parameters = {"loader_key": runner.loader_key, "scope": scope}
passed_metadata_parameters = {
k: v for k, v in metadata_parameters.items() if v is not None
}
if path_to_artifact:
self.experiment.log_asset(
path_to_artifact,
file_name=tag,
step=runner.batch_step,
metadata=passed_metadata_parameters,
)
else:
self.experiment.log_asset_data(
pickle.dumps(artifact),
file_name=tag,
step=runner.batch_step,
epoch=runner.epoch_step,
metadata=passed_metadata_parameters,
)
def log_image(
self,
tag: str,
image: np.ndarray,
runner: "IRunner",
scope: str = None,
) -> None:
"""Logs image to the logger."""
image_name = f"{scope}_{tag}" if scope is not None else tag
self.experiment.log_image(image, name=image_name, step=runner.batch_step)
def log_hparams(self, hparams: Dict, runner: "IRunner" = None) -> None:
"""Logs hyperparameters to the logger."""
self.experiment.log_parameters(hparams)
def log_metrics(
self,
metrics: Dict[str, float],
scope: str,
runner: "IRunner",
) -> None:
"""Logs the metrics to the logger."""
if (scope == "batch" and not self.log_batch_metrics) or (
scope in ["loader", "epoch"] and not self.log_epoch_metrics
):
return
if runner.batch_step % self.logging_frequency == 0:
self.experiment.log_metrics(
metrics,
step=runner.batch_step,
epoch=runner.epoch_step,
prefix=f"{runner.loader_key}_{scope}",
)
def flush_log(self) -> None:
"""Flushes the loggers."""
pass
def close_log(self) -> None:
"""Closes the logger."""
self.experiment.end()
__all__ = ["CometLogger"]
| CometLogger |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_methods.py | {
"start": 18790,
"end": 19086
} | class ____:
params = [True, False]
param_names = ["ascending"]
def setup(self, ascending):
self.df = DataFrame(np.random.randn(1000000, 2), columns=list("AB"))
def time_frame_sort_values(self, ascending):
self.df.sort_values(by="A", ascending=ascending)
| SortValues |
python | readthedocs__readthedocs.org | readthedocs/api/v3/filters.py | {
"start": 1963,
"end": 2423
} | class ____(filters.FilterSet):
name = filters.CharFilter(field_name="name", lookup_expr="icontains")
full_name = filters.CharFilter(field_name="full_name", lookup_expr="icontains")
organization = filters.CharFilter(field_name="organization__slug")
class Meta:
model = RemoteRepository
fields = [
"name",
"full_name",
"vcs_provider",
"organization",
]
| RemoteRepositoryFilter |
python | pandas-dev__pandas | pandas/core/reshape/reshape.py | {
"start": 1645,
"end": 40256
} | class ____:
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
index : MultiIndex
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame will be used.
Examples
--------
>>> index = pd.MultiIndex.from_tuples(
... [("one", "a"), ("one", "b"), ("two", "a"), ("two", "b")]
... )
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(
self, index: MultiIndex, level: Level, constructor, sort: bool = True
) -> None:
self.constructor = constructor
self.sort = sort
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# `nan` values have code `-1`, when sorting, we lift to assign them
# at index 0
self.has_nan = -1 in self.index.codes[self.level]
should_lift = self.has_nan and self.sort
self.lift = 1 if should_lift else 0
# Note: the "pop" below alters these in-place.
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
self.unique_nan_index: int = -1
if not self.sort:
unique_codes: np.ndarray = unique(self.index.codes[self.level])
if self.has_nan:
# drop nan codes, because they are not represented in level
nan_mask = unique_codes == -1
unique_codes = unique_codes[~nan_mask]
self.unique_nan_index = np.flatnonzero(nan_mask)[0]
self.removed_level = self.removed_level.take(unique_codes)
self.removed_level_full = self.removed_level_full.take(unique_codes)
if get_option("performance_warnings"):
# Bug fix GH 20601
# If the data frame is too big, the number of unique index combination
# will cause int32 overflow on windows environments.
# We want to check and raise a warning before this happens
num_rows = max(index_level.size for index_level in self.new_index_levels)
num_columns = self.removed_level.size
# GH20601: This forces an overflow if the number of cells is too high.
# GH 26314: Previous ValueError raised was too restrictive for many users.
num_cells = num_rows * num_columns
if num_cells > np.iinfo(np.int32).max:
warnings.warn(
f"The following operation may generate {num_cells} cells "
f"in the resulting pandas object.",
PerformanceWarning,
stacklevel=find_stack_level(),
)
self._make_selectors()
@cache_readonly
def _indexer_and_to_sort(
self,
) -> tuple[
npt.NDArray[np.intp],
list[np.ndarray], # each has _some_ signed integer dtype
]:
v = self.level
codes = list(self.index.codes)
if not self.sort:
# Create new codes considering that labels are already sorted
codes = [factorize(code)[0] for code in codes]
levs = list(self.index.levels)
to_sort = codes[:v] + codes[v + 1 :] + [codes[v]]
sizes = tuple(len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]])
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = get_group_index_sorter(comp_index, ngroups)
return indexer, to_sort
@cache_readonly
def sorted_labels(self) -> list[np.ndarray]:
indexer, to_sort = self._indexer_and_to_sort
if self.sort:
return [line.take(indexer) for line in to_sort]
return to_sort
def _make_sorted_values(self, values: np.ndarray) -> np.ndarray:
indexer, _ = self._indexer_and_to_sort
sorted_values = algos.take_nd(values, indexer, axis=0)
return sorted_values
def _make_selectors(self) -> None:
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = tuple(len(x) for x in new_levels)
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.has_nan
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError("Index contains duplicate entries, cannot reshape")
self.group_index = comp_index
self.mask = mask
if self.sort:
self.compressor = comp_index.searchsorted(np.arange(ngroups))
else:
self.compressor = np.sort(np.unique(comp_index, return_index=True)[1])
@cache_readonly
def mask_all(self) -> bool:
return bool(self.mask.all())
@cache_readonly
def arange_result(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.bool_]]:
# We cache this for reuse in ExtensionBlock._unstack
dummy_arr = np.arange(len(self.index), dtype=np.intp)
new_values, mask = self.get_new_values(dummy_arr, fill_value=-1)
return new_values, mask.any(0)
# TODO: in all tests we have mask.any(0).all(); can we rely on that?
def get_result(self, obj, value_columns, fill_value) -> DataFrame:
values = obj._values
if values.ndim == 1:
values = values[:, np.newaxis]
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError("must pass column labels for multi-column data")
new_values, _ = self.get_new_values(values, fill_value)
columns = self.get_new_columns(value_columns)
index = self.new_index
result = self.constructor(
new_values, index=index, columns=columns, dtype=new_values.dtype, copy=False
)
if isinstance(values, np.ndarray):
base, new_base = values.base, new_values.base
elif isinstance(values, NDArrayBackedExtensionArray):
base, new_base = values._ndarray.base, new_values._ndarray.base
else:
base, new_base = 1, 2 # type: ignore[assignment]
if base is new_base:
# We can only get here if one of the dimensions is size 1
result._mgr.add_references(obj._mgr)
return result
def get_new_values(self, values, fill_value=None):
if values.ndim == 1:
values = values[:, np.newaxis]
sorted_values = self._make_sorted_values(values)
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = self.mask_all
# we can simply reshape if we don't have a mask
if mask_all and len(values):
# TODO: Under what circumstances can we rely on sorted_values
# matching values? When that holds, we can slice instead
# of take (in particular for EAs)
new_values = (
sorted_values.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
dtype = values.dtype
if isinstance(dtype, ExtensionDtype):
# GH#41875
# We are assuming that fill_value can be held by this dtype,
# unlike the non-EA case that promotes.
cls = dtype.construct_array_type()
new_values = cls._empty(result_shape, dtype=dtype)
if not mask_all:
new_values[:] = fill_value
else:
if not mask_all:
old_dtype = dtype
dtype, fill_value = maybe_promote(dtype, fill_value)
if old_dtype != dtype:
if old_dtype.kind not in "iub":
warnings.warn(
# GH#12189, GH#53868
"Using a fill_value that cannot be held in the existing "
"dtype is deprecated and will raise in a future version.",
Pandas4Warning,
stacklevel=find_stack_level(),
)
elif not isna(fill_value):
warnings.warn(
# GH#12189, GH#53868
"Using a fill_value that cannot be held in the existing "
"dtype is deprecated and will raise in a future version.",
Pandas4Warning,
stacklevel=find_stack_level(),
)
new_values = np.empty(result_shape, dtype=dtype)
if not mask_all:
new_values.fill(fill_value)
name = dtype.name
new_mask = np.zeros(result_shape, dtype=bool)
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values.dtype):
sorted_values = sorted_values.view("i8")
new_values = new_values.view("i8")
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
libreshape.unstack(
sorted_values,
mask.view("u1"),
stride,
length,
width,
new_values,
new_mask.view("u1"),
)
# reconstruct dtype if needed
if needs_i8_conversion(values.dtype):
# view as datetime64 so we can wrap in DatetimeArray and use
# DTA's view method
new_values = new_values.view("M8[ns]")
new_values = ensure_wrapped_if_datetimelike(new_values)
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self, value_columns: Index | None):
if value_columns is None:
if not self.has_nan:
return self.removed_level._rename(name=self.removed_name)
lev = self.removed_level.insert(0, item=self.removed_level._na_value)
return lev.rename(self.removed_name)
stride = len(self.removed_level) + self.has_nan
width = len(value_columns)
propagator = np.repeat(np.arange(width), stride)
new_levels: FrozenList | list[Index]
if isinstance(value_columns, MultiIndex):
new_levels = value_columns.levels + (self.removed_level_full,) # pyright: ignore[reportOperatorIssue]
new_names = value_columns.names + (self.removed_name,)
new_codes = [lab.take(propagator) for lab in value_columns.codes]
else:
new_levels = [
value_columns,
self.removed_level_full,
]
new_names = [value_columns.name, self.removed_name]
new_codes = [propagator]
repeater = self._repeater
# The entire level is then just a repetition of the single chunk:
new_codes.append(np.tile(repeater, width))
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
@cache_readonly
def _repeater(self) -> np.ndarray:
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new codes to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.has_nan:
# insert nan index at first position
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
stride = len(self.removed_level) + self.has_nan
repeater = np.arange(stride) - self.lift
if self.has_nan and not self.sort:
assert self.unique_nan_index > -1, (
"`unique_nan_index` not properly initialized"
)
# assign -1 where should be nan according to the unique values.
repeater[self.unique_nan_index] = -1
# compensate for the removed index level
repeater[self.unique_nan_index + 1 :] -= 1
return repeater
@cache_readonly
def new_index(self) -> MultiIndex | Index:
# Does not depend on values or value_columns
if self.sort:
labels = self.sorted_labels[:-1]
else:
v = self.level
codes = list(self.index.codes)
labels = codes[:v] + codes[v + 1 :]
result_codes = [lab.take(self.compressor) for lab in labels]
# construct the new index
if len(self.new_index_levels) == 1:
level, level_codes = self.new_index_levels[0], result_codes[0]
if (level_codes == -1).any():
level = level.insert(len(level), level._na_value)
return level.take(level_codes).rename(self.new_index_names[0])
return MultiIndex(
levels=self.new_index_levels,
codes=result_codes,
names=self.new_index_names,
verify_integrity=False,
)
def _unstack_multiple(
data: Series | DataFrame, clocs, fill_value=None, sort: bool = True
):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
index = cast(MultiIndex, index) # caller is responsible for checking
# GH 19966 Make sure if MultiIndexed index has tuple name, they will be
# recognised as a whole
if clocs in index.names:
clocs = [clocs]
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
ccodes = [index.codes[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rcodes = [index.codes[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = tuple(len(x) for x in clevels)
group_index = get_group_index(ccodes, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False)
if not rlocs:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name="__placeholder__")
else:
dummy_index = MultiIndex(
levels=rlevels + [obs_ids],
codes=rcodes + [comp_ids],
names=rnames + ["__placeholder__"],
verify_integrity=False,
)
if isinstance(data, Series):
dummy = data.copy(deep=False)
dummy.index = dummy_index
unstacked = dummy.unstack("__placeholder__", fill_value=fill_value, sort=sort)
new_levels = clevels
new_names = cnames
new_codes = recons_codes
else:
if isinstance(data.columns, MultiIndex):
result = data
while clocs:
val = clocs.pop(0)
# error: Incompatible types in assignment (expression has type
# "DataFrame | Series", variable has type "DataFrame")
result = result.unstack( # type: ignore[assignment]
val, fill_value=fill_value, sort=sort
)
clocs = [v if v < val else v - 1 for v in clocs]
return result
# GH#42579 deep=False to avoid consolidating
dummy_df = data.copy(deep=False)
dummy_df.index = dummy_index
# error: Incompatible types in assignment (expression has type "DataFrame |
# Series", variable has type "DataFrame")
unstacked = dummy_df.unstack( # type: ignore[assignment]
"__placeholder__", fill_value=fill_value, sort=sort
)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
assert isinstance(unstcols, MultiIndex) # for mypy
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_codes = [unstcols.codes[0]]
new_codes.extend(rec.take(unstcols.codes[-1]) for rec in recons_codes)
new_columns = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
@overload
def unstack(obj: Series, level, fill_value=..., sort: bool = ...) -> DataFrame: ...
@overload
def unstack(
obj: Series | DataFrame, level, fill_value=..., sort: bool = ...
) -> Series | DataFrame: ...
def unstack(
obj: Series | DataFrame, level, fill_value=None, sort: bool = True
) -> Series | DataFrame:
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value, sort=sort)
else:
level = level[0]
if not is_integer(level) and not level == "__placeholder__":
# check if level is valid in case of regular index
obj.index._get_level_number(level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value, sort=sort)
else:
return obj.T.stack()
elif not isinstance(obj.index, MultiIndex):
# GH 36113
# Give nicer error messages when unstack a Series whose
# Index is not a MultiIndex.
raise ValueError(
f"index must be a MultiIndex to unstack, {type(obj.index)} was passed"
)
else:
if is_1d_only_ea_dtype(obj.dtype):
return _unstack_extension_series(obj, level, fill_value, sort=sort)
unstacker = _Unstacker(
obj.index, level=level, constructor=obj._constructor_expanddim, sort=sort
)
return unstacker.get_result(obj, value_columns=None, fill_value=fill_value)
def _unstack_frame(
obj: DataFrame, level, fill_value=None, sort: bool = True
) -> DataFrame:
assert isinstance(obj.index, MultiIndex) # checked by caller
unstacker = _Unstacker(
obj.index, level=level, constructor=obj._constructor, sort=sort
)
if not obj._can_fast_transpose:
mgr = obj._mgr.unstack(unstacker, fill_value=fill_value)
return obj._constructor_from_mgr(mgr, axes=mgr.axes)
else:
return unstacker.get_result(
obj, value_columns=obj.columns, fill_value=fill_value
)
def _unstack_extension_series(
series: Series, level, fill_value, sort: bool
) -> DataFrame:
"""
Unstack an ExtensionArray-backed Series.
The ExtensionDtype is preserved.
Parameters
----------
series : Series
A Series with an ExtensionArray for values
level : Any
The level name or number.
fill_value : Any
The user-level (not physical storage) fill value to use for
missing values introduced by the reshape. Passed to
``series.values.take``.
sort : bool
Whether to sort the resulting MuliIndex levels
Returns
-------
DataFrame
Each column of the DataFrame will have the same dtype as
the input Series.
"""
# Defer to the logic in ExtensionBlock._unstack
df = series.to_frame()
result = df.unstack(level=level, fill_value=fill_value, sort=sort)
# equiv: result.droplevel(level=0, axis=1)
# but this avoids an extra copy
result.columns = result.columns._drop_level_numbers([0])
# error: Incompatible return value type (got "DataFrame | Series", expected
# "DataFrame")
return result # type: ignore[return-value]
def stack(
frame: DataFrame, level=-1, dropna: bool = True, sort: bool = True
) -> Series | DataFrame:
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series or DataFrame
"""
def stack_factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(
frame, level_num=level_num, dropna=dropna, sort=sort
)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_codes = [lab.repeat(K) for lab in frame.index.codes]
clev, clab = stack_factorize(frame.columns)
new_levels.append(clev)
new_codes.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
else:
levels, (ilab, clab) = zip(*map(stack_factorize, (frame.index, frame.columns)))
codes = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(
levels=levels,
codes=codes,
names=[frame.index.name, frame.columns.name],
verify_integrity=False,
)
new_values: ArrayLike
if not frame.empty and frame._is_homogeneous_type:
# For homogeneous EAs, frame._values will coerce to object. So
# we concatenate instead.
dtypes = list(frame.dtypes._values)
dtype = dtypes[0]
if isinstance(dtype, ExtensionDtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type(
[col._values for _, col in frame.items()]
)
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame._values.ravel()
else:
# non-homogeneous
new_values = frame._values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame: DataFrame, level, dropna: bool = True, sort: bool = True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
# error: Incompatible types in assignment (expression has type
# "Series | DataFrame", variable has type "DataFrame")
result = stack(result, lev, dropna=dropna, sort=sort) # type: ignore[assignment]
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
while level:
lev = level.pop(0)
# error: Incompatible types in assignment (expression has type
# "Series | DataFrame", variable has type "DataFrame")
result = stack(result, lev, dropna=dropna, sort=sort) # type: ignore[assignment]
# Decrement all level numbers greater than current, as these
# have now shifted down by one
level = [v if v <= lev else v - 1 for v in level]
else:
raise ValueError(
"level should contain all level names or all level "
"numbers, not a mixture of the two."
)
return result
def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex | Index:
"""Creates a MultiIndex from the first N-1 levels of this MultiIndex."""
if len(columns.levels) <= 2:
return columns.levels[0]._rename(name=columns.names[0])
levs = (
[lev[c] if c >= 0 else None for c in codes]
for lev, codes in zip(columns.levels[:-1], columns.codes[:-1], strict=True)
)
# Remove duplicate tuples in the MultiIndex.
tuples = zip(*levs)
unique_tuples = (key for key, _ in itertools.groupby(tuples))
new_levs = zip(*unique_tuples)
# The dtype of each level must be explicitly set to avoid inferring the wrong type.
# See GH-36991.
return MultiIndex.from_arrays(
[
# Not all indices can accept None values.
Index(new_lev, dtype=lev.dtype) if None not in new_lev else new_lev
for new_lev, lev in zip(new_levs, columns.levels)
],
names=columns.names[:-1],
)
def _stack_multi_columns(
frame: DataFrame, level_num: int = -1, dropna: bool = True, sort: bool = True
) -> DataFrame:
def _convert_level_number(level_num: int, columns: Index):
"""
Logic for converting the level number to something we can safely pass
to swaplevel.
If `level_num` matches a column name return the name from
position `level_num`, otherwise return `level_num`.
"""
if level_num in columns.names:
return columns.names[level_num]
return level_num
this = frame.copy(deep=False)
mi_cols = this.columns # cast(MultiIndex, this.columns)
assert isinstance(mi_cols, MultiIndex) # caller is responsible
# this makes life much simpler
if level_num != mi_cols.nlevels - 1:
# roll levels to put selected level at end
roll_columns = mi_cols
for i in range(level_num, mi_cols.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = mi_cols = roll_columns
if not mi_cols._is_lexsorted() and sort:
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, mi_cols)
this = this.sort_index(level=level_to_sort, axis=1)
mi_cols = this.columns
mi_cols = cast(MultiIndex, mi_cols)
new_columns = _stack_multi_column_index(mi_cols)
# time to ravel the values
new_data = {}
level_vals = mi_cols.levels[-1]
level_codes = unique(mi_cols.codes[-1])
if sort:
level_codes = np.sort(level_codes)
level_vals_nan = level_vals.insert(len(level_vals), None)
level_vals_used = np.take(level_vals_nan, level_codes)
levsize = len(level_codes)
drop_cols = []
for key in new_columns:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals_nan.take(chunk.columns.codes[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
subset = this.iloc[:, loc]
dtype = find_common_type(subset.dtypes.tolist())
if isinstance(dtype, ExtensionDtype):
# TODO(EA2D): won't need special case, can go through .values
# paths below (might change to ._values)
value_slice = dtype.construct_array_type()._concat_same_type(
[x._values.astype(dtype, copy=False) for _, x in subset.items()]
)
N, K = subset.shape
idx = np.arange(N * K).reshape(K, N).T.reshape(-1)
value_slice = value_slice.take(idx)
else:
value_slice = subset.values
if value_slice.ndim > 1:
# i.e. not extension
value_slice = value_slice.ravel()
new_data[key] = value_slice
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_codes = [lab.repeat(levsize) for lab in this.index.codes]
else:
old_codes, old_levels = factorize_from_iterable(this.index)
new_levels = [old_levels]
new_codes = [old_codes.repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_codes.append(np.tile(level_codes, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
if frame.columns.nlevels > 1:
desired_columns = frame.columns._drop_level_numbers([level_num]).unique()
if not result.columns.equals(desired_columns):
result = result[desired_columns]
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how="all")
return result
def _reorder_for_extension_array_stack(
arr: ExtensionArray, n_rows: int, n_columns: int
) -> ExtensionArray:
"""
Re-orders the values when stacking multiple extension-arrays.
The indirect stacking method used for EAs requires a followup
take to get the order correct.
Parameters
----------
arr : ExtensionArray
n_rows, n_columns : int
The number of rows and columns in the original DataFrame.
Returns
-------
taken : ExtensionArray
The original `arr` with elements re-ordered appropriately
Examples
--------
>>> arr = np.array(["a", "b", "c", "d", "e", "f"])
>>> _reorder_for_extension_array_stack(arr, 2, 3)
array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')
>>> _reorder_for_extension_array_stack(arr, 3, 2)
array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
"""
# final take to get the order correct.
# idx is an indexer like
# [c0r0, c1r0, c2r0, ...,
# c0r1, c1r1, c2r1, ...]
idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.reshape(-1)
return arr.take(idx)
def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame:
if frame.columns.nunique() != len(frame.columns):
raise ValueError("Columns with duplicate values are not supported in stack")
if not len(level):
return frame
set_levels = set(level)
stack_cols = frame.columns._drop_level_numbers(
[k for k in range(frame.columns.nlevels - 1, -1, -1) if k not in set_levels]
)
result: Series | DataFrame
if not isinstance(frame.columns, MultiIndex):
# GH#58817 Fast path when we're stacking the columns of a non-MultiIndex.
# When columns are homogeneous EAs, we pass through object
# dtype but this is still slightly faster than the normal path.
if len(frame.columns) > 0 and frame._is_homogeneous_type:
dtype = frame._mgr.blocks[0].dtype
else:
dtype = None
result = frame._constructor_sliced(
frame._values.reshape(-1, order="F"), dtype=dtype
)
else:
result = stack_reshape(frame, level, set_levels, stack_cols)
# Construct the correct MultiIndex by combining the frame's index and
# stacked columns.
ratio = 0 if frame.empty else len(result) // len(frame)
index_levels: list | FrozenList
if isinstance(frame.index, MultiIndex):
index_levels = frame.index.levels
index_codes = list(np.tile(frame.index.codes, (1, ratio)))
else:
codes, uniques = factorize(frame.index, use_na_sentinel=False)
index_levels = [uniques]
index_codes = list(np.tile(codes, (1, ratio)))
if len(level) > 1:
# Arrange columns in the order we want to take them, e.g. level=[2, 0, 1]
sorter = np.argsort(level)
assert isinstance(stack_cols, MultiIndex)
ordered_stack_cols = stack_cols._reorder_ilevels(sorter)
else:
ordered_stack_cols = stack_cols
ordered_stack_cols_unique = ordered_stack_cols.unique()
if isinstance(ordered_stack_cols, MultiIndex):
column_levels = ordered_stack_cols.levels
column_codes = ordered_stack_cols.drop_duplicates().codes
else:
column_levels = [ordered_stack_cols_unique]
column_codes = [factorize(ordered_stack_cols_unique, use_na_sentinel=False)[0]]
# error: Incompatible types in assignment (expression has type "list[ndarray[Any,
# dtype[Any]]]", variable has type "FrozenList")
column_codes = [np.repeat(codes, len(frame)) for codes in column_codes] # type: ignore[assignment]
result.index = MultiIndex(
levels=index_levels + column_levels,
codes=index_codes + column_codes,
names=frame.index.names + list(ordered_stack_cols.names),
verify_integrity=False,
)
# sort result, but faster than calling sort_index since we know the order we need
len_df = len(frame)
n_uniques = len(ordered_stack_cols_unique)
indexer = np.arange(n_uniques)
idxs = np.tile(len_df * indexer, len_df) + np.repeat(np.arange(len_df), n_uniques)
result = result.take(idxs)
# Reshape/rename if needed and dropna
if result.ndim == 2 and frame.columns.nlevels == len(level):
if len(result.columns) == 0:
result = Series(index=result.index)
else:
result = result.iloc[:, 0]
if result.ndim == 1:
result.name = None
return result
def stack_reshape(
frame: DataFrame, level: list[int], set_levels: set[int], stack_cols: Index
) -> Series | DataFrame:
"""Reshape the data of a frame for stack.
This function takes care of most of the work that stack needs to do. Caller
will sort the result once the appropriate index is set.
Parameters
----------
frame: DataFrame
DataFrame that is to be stacked.
level: list of ints.
Levels of the columns to stack.
set_levels: set of ints.
Same as level, but as a set.
stack_cols: Index.
Columns of the result when the DataFrame is stacked.
Returns
-------
The data of behind the stacked DataFrame.
"""
# non-MultIndex takes a fast path.
assert isinstance(frame.columns, MultiIndex)
# If we need to drop `level` from columns, it needs to be in descending order
drop_levnums = sorted(level, reverse=True)
# Grab data for each unique index to be stacked
buf = []
for idx in stack_cols.unique():
if len(frame.columns) == 1:
data = frame.copy(deep=False)
else:
# Take the data from frame corresponding to this idx value
if len(level) == 1:
idx = (idx,)
gen = iter(idx)
column_indexer = tuple(
next(gen) if k in set_levels else slice(None)
for k in range(frame.columns.nlevels)
)
data = frame.loc[:, column_indexer]
if len(level) < frame.columns.nlevels:
data.columns = data.columns._drop_level_numbers(drop_levnums)
elif stack_cols.nlevels == 1:
if data.ndim == 1:
data.name = 0
else:
data.columns = default_index(len(data.columns))
buf.append(data)
if len(buf) > 0 and not frame.empty:
result = concat(buf, ignore_index=True)
else:
# input is empty
if len(level) < frame.columns.nlevels:
# concat column order may be different from dropping the levels
new_columns = frame.columns._drop_level_numbers(drop_levnums).unique()
else:
new_columns = [0]
result = DataFrame(columns=new_columns, dtype=frame._values.dtype)
if len(level) < frame.columns.nlevels:
# concat column order may be different from dropping the levels
desired_columns = frame.columns._drop_level_numbers(drop_levnums).unique()
if not result.columns.equals(desired_columns):
result = result[desired_columns]
return result
| _Unstacker |
python | huggingface__transformers | src/transformers/models/d_fine/modeling_d_fine.py | {
"start": 44079,
"end": 46391
} | class ____(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it user-friendly
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
def replace_batch_norm(model):
r"""
Recursively replace all `torch.nn.BatchNorm2d` with `DFineFrozenBatchNorm2d`.
Args:
model (torch.nn.Module):
input model
"""
for name, module in model.named_children():
if isinstance(module, nn.BatchNorm2d):
new_module = DFineFrozenBatchNorm2d(module.num_features)
if module.weight.device != torch.device("meta"):
new_module.weight.copy_(module.weight)
new_module.bias.copy_(module.bias)
new_module.running_mean.copy_(module.running_mean)
new_module.running_var.copy_(module.running_var)
model._modules[name] = new_module
if len(list(module.children())) > 0:
replace_batch_norm(module)
| DFineFrozenBatchNorm2d |
python | euske__pdfminer | pdfminer/pdfinterp.py | {
"start": 9324,
"end": 25436
} | class ____:
debug = 0
def __init__(self, rsrcmgr, device):
self.rsrcmgr = rsrcmgr
self.device = device
return
def dup(self):
return self.__class__(self.rsrcmgr, self.device)
# init_resources(resources):
# Prepare the fonts and XObjects listed in the Resource attribute.
def init_resources(self, resources):
self.resources = resources
self.fontmap = {}
self.xobjmap = {}
self.csmap = PREDEFINED_COLORSPACE.copy()
if not resources:
return
def get_colorspace(spec):
if isinstance(spec, list):
name = literal_name(spec[0])
else:
name = literal_name(spec)
if name == 'ICCBased' and isinstance(spec, list) and 2 <= len(spec):
return PDFColorSpace(name, stream_value(spec[1])['N'])
elif name == 'DeviceN' and isinstance(spec, list) and 2 <= len(spec):
return PDFColorSpace(name, len(list_value(spec[1])))
else:
return PREDEFINED_COLORSPACE.get(name)
for (k, v) in dict_value(resources).items():
if self.debug:
logging.debug('Resource: %r: %r' % (k, v))
if k == 'Font':
for (fontid, spec) in dict_value(v).items():
objid = None
if isinstance(spec, PDFObjRef):
objid = spec.objid
spec = dict_value(spec)
self.fontmap[fontid] = self.rsrcmgr.get_font(objid, spec)
elif k == 'ColorSpace':
for (csid, spec) in dict_value(v).items():
self.csmap[csid] = get_colorspace(resolve1(spec))
elif k == 'ProcSet':
self.rsrcmgr.get_procset(list_value(v))
elif k == 'XObject':
for (xobjid, xobjstrm) in dict_value(v).items():
self.xobjmap[xobjid] = xobjstrm
return
# init_state(ctm)
# Initialize the text and graphic states for rendering a page.
def init_state(self, ctm):
# gstack: stack for graphical states.
self.gstack = []
self.ctm = ctm
self.device.set_ctm(self.ctm)
self.textstate = PDFTextState()
self.graphicstate = PDFGraphicState()
self.curpath = []
# argstack: stack for command arguments.
self.argstack = []
# set some global states.
self.scs = self.ncs = None
if self.csmap:
for v in self.csmap.values():
self.scs = self.ncs = v
break
return
def push(self, obj):
self.argstack.append(obj)
return
def pop(self, n):
if n == 0:
return []
x = self.argstack[-n:]
self.argstack = self.argstack[:-n]
return x
def get_current_state(self):
return (self.ctm, self.textstate.copy(), self.graphicstate.copy())
def set_current_state(self, state):
(self.ctm, self.textstate, self.graphicstate) = state
self.device.set_ctm(self.ctm)
return
# gsave
def do_q(self):
self.gstack.append(self.get_current_state())
return
# grestore
def do_Q(self):
if self.gstack:
self.set_current_state(self.gstack.pop())
return
# concat-matrix
def do_cm(self, a1, b1, c1, d1, e1, f1):
self.ctm = mult_matrix((a1, b1, c1, d1, e1, f1), self.ctm)
self.device.set_ctm(self.ctm)
return
# setlinewidth
def do_w(self, linewidth):
self.graphicstate.linewidth = linewidth
return
# setlinecap
def do_J(self, linecap):
self.graphicstate.linecap = linecap
return
# setlinejoin
def do_j(self, linejoin):
self.graphicstate.linejoin = linejoin
return
# setmiterlimit
def do_M(self, miterlimit):
self.graphicstate.miterlimit = miterlimit
return
# setdash
def do_d(self, dash, phase):
self.graphicstate.dash = (dash, phase)
return
# setintent
def do_ri(self, intent):
self.graphicstate.intent = intent
return
# setflatness
def do_i(self, flatness):
self.graphicstate.flatness = flatness
return
# load-gstate
def do_gs(self, name):
#XXX
return
# moveto
def do_m(self, x, y):
self.curpath.append(('m', x, y))
return
# lineto
def do_l(self, x, y):
self.curpath.append(('l', x, y))
return
# curveto
def do_c(self, x1, y1, x2, y2, x3, y3):
self.curpath.append(('c', x1, y1, x2, y2, x3, y3))
return
# urveto
def do_v(self, x2, y2, x3, y3):
self.curpath.append(('v', x2, y2, x3, y3))
return
# rveto
def do_y(self, x1, y1, x3, y3):
self.curpath.append(('y', x1, y1, x3, y3))
return
# closepath
def do_h(self):
self.curpath.append(('h',))
return
# rectangle
def do_re(self, x, y, w, h):
self.curpath.append(('m', x, y))
self.curpath.append(('l', x+w, y))
self.curpath.append(('l', x+w, y+h))
self.curpath.append(('l', x, y+h))
self.curpath.append(('h',))
return
# stroke
def do_S(self):
self.device.paint_path(self.graphicstate, True, False, False, self.curpath)
self.curpath = []
return
# close-and-stroke
def do_s(self):
self.do_h()
self.do_S()
return
# fill
def do_f(self):
self.device.paint_path(self.graphicstate, False, True, False, self.curpath)
self.curpath = []
return
# fill (obsolete)
do_F = do_f
# fill-even-odd
def do_f_a(self):
self.device.paint_path(self.graphicstate, False, True, True, self.curpath)
self.curpath = []
return
# fill-and-stroke
def do_B(self):
self.device.paint_path(self.graphicstate, True, True, False, self.curpath)
self.curpath = []
return
# fill-and-stroke-even-odd
def do_B_a(self):
self.device.paint_path(self.graphicstate, True, True, True, self.curpath)
self.curpath = []
return
# close-fill-and-stroke
def do_b(self):
self.do_h()
self.do_B()
return
# close-fill-and-stroke-even-odd
def do_b_a(self):
self.do_h()
self.do_B_a()
return
# close-only
def do_n(self):
self.curpath = []
return
# clip
def do_W(self):
return
# clip-even-odd
def do_W_a(self):
return
# setcolorspace-stroking
def do_CS(self, name):
try:
self.scs = self.csmap[literal_name(name)]
except KeyError:
if STRICT:
raise PDFInterpreterError('Undefined ColorSpace: %r' % name)
return
# setcolorspace-non-strokine
def do_cs(self, name):
try:
self.ncs = self.csmap[literal_name(name)]
except KeyError:
if STRICT:
raise PDFInterpreterError('Undefined ColorSpace: %r' % name)
return
# setgray-stroking
def do_G(self, gray):
#self.do_CS(LITERAL_DEVICE_GRAY)
return
# setgray-non-stroking
def do_g(self, gray):
#self.do_cs(LITERAL_DEVICE_GRAY)
return
# setrgb-stroking
def do_RG(self, r, g, b):
#self.do_CS(LITERAL_DEVICE_RGB)
return
# setrgb-non-stroking
def do_rg(self, r, g, b):
#self.do_cs(LITERAL_DEVICE_RGB)
return
# setcmyk-stroking
def do_K(self, c, m, y, k):
#self.do_CS(LITERAL_DEVICE_CMYK)
return
# setcmyk-non-stroking
def do_k(self, c, m, y, k):
#self.do_cs(LITERAL_DEVICE_CMYK)
return
# setcolor
def do_SCN(self):
if self.scs:
n = self.scs.ncomponents
else:
if STRICT:
raise PDFInterpreterError('No colorspace specified!')
n = 1
self.pop(n)
return
def do_scn(self):
if self.ncs:
n = self.ncs.ncomponents
else:
if STRICT:
raise PDFInterpreterError('No colorspace specified!')
n = 1
self.pop(n)
return
def do_SC(self):
self.do_SCN()
return
def do_sc(self):
self.do_scn()
return
# sharing-name
def do_sh(self, name):
return
# begin-text
def do_BT(self):
self.textstate.reset()
return
# end-text
def do_ET(self):
return
# begin-compat
def do_BX(self):
return
# end-compat
def do_EX(self):
return
# marked content operators
def do_MP(self, tag):
self.device.do_tag(tag)
return
def do_DP(self, tag, props):
self.device.do_tag(tag, props)
return
def do_BMC(self, tag):
self.device.begin_tag(tag)
return
def do_BDC(self, tag, props):
self.device.begin_tag(tag, props)
return
def do_EMC(self):
self.device.end_tag()
return
# setcharspace
def do_Tc(self, space):
self.textstate.charspace = space
return
# setwordspace
def do_Tw(self, space):
self.textstate.wordspace = space
return
# textscale
def do_Tz(self, scale):
self.textstate.scaling = scale
return
# setleading
def do_TL(self, leading):
self.textstate.leading = -leading
return
# selectfont
def do_Tf(self, fontid, fontsize):
try:
self.textstate.font = self.fontmap[literal_name(fontid)]
except KeyError:
if STRICT:
raise PDFInterpreterError('Undefined Font id: %r' % fontid)
self.textstate.font = self.rsrcmgr.get_font(None, {})
self.textstate.fontsize = fontsize
return
# setrendering
def do_Tr(self, render):
self.textstate.render = render
return
# settextrise
def do_Ts(self, rise):
self.textstate.rise = rise
return
# text-move
def do_Td(self, tx, ty):
(a, b, c, d, e, f) = self.textstate.matrix
self.textstate.matrix = (a, b, c, d, tx*a+ty*c+e, tx*b+ty*d+f)
self.textstate.linematrix = (0, 0)
#print('Td(%r,%r): %r' % (tx, ty, self.textstate), file=sys.stderr)
return
# text-move
def do_TD(self, tx, ty):
(a, b, c, d, e, f) = self.textstate.matrix
self.textstate.matrix = (a, b, c, d, tx*a+ty*c+e, tx*b+ty*d+f)
self.textstate.leading = ty
self.textstate.linematrix = (0, 0)
#print('TD(%r,%r): %r' % (tx, ty, self.textstate), file=sys.stderr)
return
# textmatrix
def do_Tm(self, a, b, c, d, e, f):
self.textstate.matrix = (a, b, c, d, e, f)
self.textstate.linematrix = (0, 0)
return
# nextline
def do_T_a(self):
(a, b, c, d, e, f) = self.textstate.matrix
self.textstate.matrix = (a, b, c, d, self.textstate.leading*c+e, self.textstate.leading*d+f)
self.textstate.linematrix = (0, 0)
return
# show-pos
def do_TJ(self, seq):
#print('TJ(%r): %r' % (seq, self.textstate), file=sys.stderr)
if self.textstate.font is None:
if STRICT:
raise PDFInterpreterError('No font specified!')
return
self.device.render_string(self.textstate, seq)
return
# show
def do_Tj(self, s):
self.do_TJ([s])
return
# quote
def do__q(self, s):
self.do_T_a()
self.do_TJ([s])
return
# doublequote
def do__w(self, aw, ac, s):
self.do_Tw(aw)
self.do_Tc(ac)
self.do_TJ([s])
return
# inline image
def do_BI(self): # never called
return
def do_ID(self): # never called
return
def do_EI(self, obj):
if 'W' in obj and 'H' in obj:
iobjid = str(id(obj))
self.device.begin_figure(iobjid, (0, 0, 1, 1), MATRIX_IDENTITY)
self.device.render_image(iobjid, obj)
self.device.end_figure(iobjid)
return
# invoke an XObject
def do_Do(self, xobjid):
xobjid = literal_name(xobjid)
try:
xobj = stream_value(self.xobjmap[xobjid])
except KeyError:
if STRICT:
raise PDFInterpreterError('Undefined xobject id: %r' % xobjid)
return
if self.debug: logging.info('Processing xobj: %r' % xobj)
subtype = xobj.get('Subtype')
if subtype is LITERAL_FORM and 'BBox' in xobj:
interpreter = self.dup()
bbox = list_value(xobj['BBox'])
matrix = list_value(xobj.get('Matrix', MATRIX_IDENTITY))
# According to PDF reference 1.7 section 4.9.1, XObjects in
# earlier PDFs (prior to v1.2) use the page's Resources entry
# instead of having their own Resources entry.
resources = dict_value(xobj.get('Resources')) or self.resources.copy()
self.device.begin_figure(xobjid, bbox, matrix)
interpreter.render_contents(resources, [xobj], ctm=mult_matrix(matrix, self.ctm))
self.device.end_figure(xobjid)
elif subtype is LITERAL_IMAGE and 'Width' in xobj and 'Height' in xobj:
self.device.begin_figure(xobjid, (0, 0, 1, 1), MATRIX_IDENTITY)
self.device.render_image(xobjid, xobj)
self.device.end_figure(xobjid)
else:
# unsupported xobject type.
pass
return
def process_page(self, page):
if self.debug: logging.info('Processing page: %r' % page)
(x0, y0, x1, y1) = page.mediabox
if page.rotate == 90:
ctm = (0, -1, 1, 0, -y0, x1)
elif page.rotate == 180:
ctm = (-1, 0, 0, -1, x1, y1)
elif page.rotate == 270:
ctm = (0, 1, -1, 0, y1, -x0)
else:
ctm = (1, 0, 0, 1, -x0, -y0)
self.device.begin_page(page, ctm)
self.render_contents(page.resources, page.contents, ctm=ctm)
self.device.end_page(page)
return
# render_contents(resources, streams, ctm)
# Render the content streams.
# This method may be called recursively.
def render_contents(self, resources, streams, ctm=MATRIX_IDENTITY):
if self.debug:
logging.info('render_contents: resources=%r, streams=%r, ctm=%r' %
(resources, streams, ctm))
self.init_resources(resources)
self.init_state(ctm)
self.execute(list_value(streams))
return
def execute(self, streams):
try:
parser = PDFContentParser(streams)
except PSEOF:
# empty page
return
while 1:
try:
(_, obj) = parser.nextobject()
except PSEOF:
break
if isinstance(obj, PSKeyword):
name = keyword_name(obj).decode('ascii')
method = 'do_%s' % name.replace('*', '_a').replace('"', '_w').replace("'", '_q')
if hasattr(self, method):
func = getattr(self, method)
nargs = func.__code__.co_argcount-1
if nargs:
args = self.pop(nargs)
if self.debug:
logging.debug('exec: %s %r' % (name, args))
if len(args) == nargs:
func(*args)
else:
if self.debug:
logging.debug('exec: %s' % name)
func()
else:
if STRICT:
raise PDFInterpreterError('Unknown operator: %r' % name)
else:
self.push(obj)
return
| PDFPageInterpreter |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 44836,
"end": 44979
} | class ____(object):
def __init__(self, tensor):
self.tensor = tensor
def __getitem__(self, x):
return self.tensor[x]
| BenchmarkSlice |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingEnum2.py | {
"start": 205,
"end": 1562
} | class ____(Enum):
VALUE1 = 1
VALUE2 = 2
def assert_never(val: NoReturn): ...
def func1(a: SomeEnum):
if a is SomeEnum.VALUE1:
pass
elif a is SomeEnum.VALUE2:
pass
else:
assert_never(a)
def func2(a: SomeEnum):
if a is SomeEnum.VALUE1:
pass
else:
# This should generate an error because
# a hasn't been narrowed to Never.
assert_never(a)
def func3(a: SomeEnum):
if not a is not SomeEnum.VALUE1:
pass
elif not a is not SomeEnum.VALUE2:
pass
else:
assert_never(a)
def func4(a: SomeEnum):
if not a is not SomeEnum.VALUE1:
pass
else:
# This should generate an error because
# a hasn't been narrowed to Never.
assert_never(a)
def func5(a: Union[str, Literal[False]]) -> str:
if a is False:
return "no"
return a
def func6(a: Union[str, Literal[False]]) -> str:
if a is not False:
return a
return "no"
def func7(a: Union[str, bool]) -> str:
if a is False:
return "False"
elif a is True:
return "True"
return a
def func8(a: object):
if a is SomeEnum.VALUE1 or a is SomeEnum.VALUE2:
reveal_type(a, expected_text="Literal[SomeEnum.VALUE1, SomeEnum.VALUE2]")
else:
reveal_type(a, expected_text="object")
| SomeEnum |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_P.py | {
"start": 17760,
"end": 18687
} | class ____(Benchmark):
r"""
Price 2 objective function.
This class defines the Price 2 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Price02}}(x) = 1 + \sin^2(x_1) + \sin^2(x_2)
- 0.1e^{(-x_1^2 - x_2^2)}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.9` for :math:`x_i = [0, 0]`
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.9
def fun(self, x, *args):
self.nfev += 1
return 1.0 + sum(sin(x) ** 2) - 0.1 * exp(-x[0] ** 2.0 - x[1] ** 2.0)
| Price02 |
python | pytorch__pytorch | benchmarks/dynamo/huggingface_llm_models.py | {
"start": 811,
"end": 1842
} | class ____(Benchmark):
SAMPLE_RATE = 16000
DURATION = 30.0 # seconds
@staticmethod
def get_model_and_inputs(model_name, device):
processor = WhisperProcessor.from_pretrained(model_name)
model = WhisperForConditionalGeneration.from_pretrained(model_name).to(device)
model.config.forced_decoder_ids = None
model.generation_config.do_sample = False
model.generation_config.temperature = 0.0
num_samples = int(WhisperBenchmark.DURATION * WhisperBenchmark.SAMPLE_RATE)
audio = torch.randn(num_samples) * 0.1
inputs = dict(
processor(
audio, sampling_rate=WhisperBenchmark.SAMPLE_RATE, return_tensors="pt"
)
)
inputs["input_features"] = inputs["input_features"].to(device)
decoder_start_token = model.config.decoder_start_token_id
inputs["decoder_input_ids"] = torch.tensor(
[[decoder_start_token]], device=device
)
return model, inputs
| WhisperBenchmark |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 397701,
"end": 398135
} | class ____(sgqlc.types.Interface):
"""An object that can be locked."""
__schema__ = github_schema
__field_names__ = ("active_lock_reason", "locked")
active_lock_reason = sgqlc.types.Field(LockReason, graphql_name="activeLockReason")
"""Reason that the conversation was locked."""
locked = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="locked")
"""`true` if the object is locked"""
| Lockable |
python | tensorflow__tensorflow | tensorflow/python/eager/graph_only_ops_test.py | {
"start": 1023,
"end": 1472
} | class ____(test_util.TensorFlowTestCase):
def testGraphPlaceholder(self):
with ops.Graph().as_default():
x_tf = graph_only_ops.graph_placeholder(dtypes.int32, shape=(1,))
y_tf = math_ops.square(x_tf)
with self.cached_session() as sess:
x = np.array([42])
y = sess.run(y_tf, feed_dict={x_tf: np.array([42])})
self.assertAllClose(np.square(x), y)
if __name__ == '__main__':
test.main()
| GraphOnlyOpsTest |
python | numpy__numpy | benchmarks/benchmarks/bench_linalg.py | {
"start": 2536,
"end": 3226
} | class ____(Benchmark):
""" Test overhead of linalg methods for small arrays """
def setup(self):
self.array_3_3 = np.eye(3) + np.arange(9.).reshape((3, 3))
self.array_3 = np.arange(3.)
self.array_5 = np.arange(5.)
self.array_5_5 = np.reshape(np.arange(25.), (5, 5))
def time_norm_small_array(self):
np.linalg.norm(self.array_5)
def time_det_small_array(self):
np.linalg.det(self.array_5_5)
def time_det_3x3(self):
np.linalg.det(self.array_3_3)
def time_solve_3x3(self):
np.linalg.solve(self.array_3_3, self.array_3)
def time_eig_3x3(self):
np.linalg.eig(self.array_3_3)
| LinalgSmallArrays |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_operator.py | {
"start": 36420,
"end": 40467
} | class ____(int):
def __int__(self):
raise ValueError("Cannot cast to int")
@pytest.mark.parametrize(
("retries", "expected"),
[
pytest.param("foo", "'retries' type must be int, not str", id="string"),
pytest.param(CustomInt(10), "'retries' type must be int, not CustomInt", id="custom int"),
],
)
def test_operator_retries_invalid(dag_maker, retries, expected):
with pytest.raises(TypeError) as ctx:
BaseOperator(task_id="test_illegal_args", retries=retries)
assert str(ctx.value) == expected
@pytest.mark.parametrize(
("retries", "expected"),
[
pytest.param(None, 0, id="None"),
pytest.param("5", 5, id="str"),
pytest.param(1, 1, id="int"),
],
)
def test_operator_retries_conversion(retries, expected):
op = BaseOperator(
task_id="test_illegal_args",
retries=retries,
)
assert op.retries == expected
def test_default_retry_delay():
task1 = BaseOperator(task_id="test_no_explicit_retry_delay")
assert task1.retry_delay == timedelta(seconds=300)
def test_dag_level_retry_delay():
with DAG(dag_id="test_dag_level_retry_delay", default_args={"retry_delay": timedelta(seconds=100)}):
task1 = BaseOperator(task_id="test_no_explicit_retry_delay")
assert task1.retry_delay == timedelta(seconds=100)
@pytest.mark.parametrize(
("task", "context", "expected_exception", "expected_rendering", "expected_log", "not_expected_log"),
[
# Simple success case.
(
MockOperator(task_id="op1", arg1="{{ foo }}"),
dict(foo="footemplated"),
None,
dict(arg1="footemplated"),
None,
"Exception rendering Jinja template",
),
# Jinja syntax error.
(
MockOperator(task_id="op1", arg1="{{ foo"),
dict(),
jinja2.TemplateSyntaxError,
None,
"Exception rendering Jinja template for task 'op1', field 'arg1'. Template: '{{ foo'",
None,
),
# Type error
(
MockOperator(task_id="op1", arg1="{{ foo + 1 }}"),
dict(foo="footemplated"),
TypeError,
None,
"Exception rendering Jinja template for task 'op1', field 'arg1'. Template: '{{ foo + 1 }}'",
None,
),
],
)
def test_render_template_fields_logging(
caplog, task, context, expected_exception, expected_rendering, expected_log, not_expected_log
):
"""Verify if operator attributes are correctly templated."""
# Trigger templating and verify results
def _do_render():
task.render_template_fields(context=context)
if expected_exception:
with (
pytest.raises(expected_exception),
caplog.at_level(logging.ERROR, logger="airflow.sdk.definitions.templater"),
):
_do_render()
else:
_do_render()
for k, v in expected_rendering.items():
assert getattr(task, k) == v
if expected_log:
assert expected_log in caplog.text
if not_expected_log:
assert not_expected_log not in caplog.text
@pytest.mark.enable_redact
def test_render_template_fields_secret_masking(caplog):
"""Test that sensitive values are masked in Jinja template rendering exceptions."""
masker = _secrets_masker()
masker.reset_masker()
masker.sensitive_variables_fields = ["password", "secret", "token"]
mask_secret("mysecretpassword", "password")
task = MockOperator(task_id="op1", arg1="{{ password + 1 }}")
context = {"password": "mysecretpassword"}
with (
pytest.raises(TypeError),
caplog.at_level(logging.ERROR, logger="airflow.sdk.definitions.templater"),
):
task.render_template_fields(context=context)
assert "mysecretpassword" not in caplog.text
assert "Template: '{{ password + 1 }}'" in caplog.text
assert "Exception rendering Jinja template for task 'op1', field 'arg1'" in caplog.text
| CustomInt |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/batching.py | {
"start": 14358,
"end": 16589
} | class ____(dataset_ops.UnaryDataset):
"""A `Dataset` that maps a function over a batch of elements."""
def __init__(self, input_dataset, map_func, batch_size, num_parallel_calls,
drop_remainder, use_legacy_function=False):
self._input_dataset = input_dataset
self._map_func = structured_function.StructuredFunctionWrapper(
map_func,
"tf.data.experimental.map_and_batch()",
dataset=input_dataset,
use_legacy_function=use_legacy_function)
self._batch_size_t = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._num_parallel_calls_t = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
self._drop_remainder_t = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
constant_drop_remainder = tensor_util.constant_value(self._drop_remainder_t)
# pylint: disable=protected-access
if constant_drop_remainder:
# NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
# or `False` (explicitly retaining the remainder).
# pylint: disable=g-long-lambda
self._element_spec = nest.map_structure(
lambda component_spec: component_spec._batch(
tensor_util.constant_value(self._batch_size_t)),
self._map_func.output_structure)
else:
self._element_spec = nest.map_structure(
lambda component_spec: component_spec._batch(None),
self._map_func.output_structure)
# pylint: enable=protected-access
variant_tensor = ged_ops.map_and_batch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
batch_size=self._batch_size_t,
num_parallel_calls=self._num_parallel_calls_t,
drop_remainder=self._drop_remainder_t,
preserve_cardinality=True,
**self._flat_structure)
super(_MapAndBatchDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._element_spec
| _MapAndBatchDataset |
python | apache__airflow | scripts/in_container/update_quarantined_test_status.py | {
"start": 1268,
"end": 7914
} | class ____(NamedTuple):
test_id: str
name: str
url: str
states: list[bool]
comment: str
test_results = []
user = ""
repo = ""
issue_id = 0
num_runs = 10
url_pattern = re.compile(r"\[([^]]*)]\(([^)]*)\)")
status_map: dict[str, bool] = {
":heavy_check_mark:": True,
":x:": False,
}
reverse_status_map: dict[bool, str] = {val: key for key, val in status_map.items()}
def get_url(result: TestResult) -> str:
return (
f"[{result.name}](https://github.com/{user}/{repo}/blob/"
f"main/{result.file}?test_id={result.test_id}#L{result.line})"
)
def parse_state_history(history_string: str) -> list[bool]:
history_array = history_string.split(" ")
status_array: list[bool] = []
for value in history_array:
if value:
status_array.append(status_map[value])
return status_array
def parse_test_history(line: str) -> TestHistory | None:
values = line.split("|")
match_url = url_pattern.match(values[1].strip())
if match_url:
name = match_url.group(1)
url = match_url.group(0)
http_url = match_url.group(2)
parsed_url = urlsplit(http_url)
the_id = parsed_url[3].split("=")[1]
comment = values[5] if len(values) >= 6 else ""
try:
states = parse_state_history(values[3])
except Exception:
states = []
return TestHistory(
test_id=the_id,
name=name,
states=states,
url=url,
comment=comment,
)
return None
def parse_body(body: str) -> dict[str, TestHistory]:
parse = False
test_history_map: dict[str, TestHistory] = {}
for line in body.splitlines(keepends=False):
if line.startswith("|-"):
parse = True
elif parse:
if not line.startswith("|"):
break
try:
status = parse_test_history(line)
except Exception:
continue
else:
if status:
test_history_map[status.test_id] = status
return test_history_map
def update_test_history(history: TestHistory, last_status: bool):
print(f"Adding status to test history: {history}, {last_status}")
return TestHistory(
test_id=history.test_id,
name=history.name,
url=history.url,
states=([last_status] + history.states)[0:num_runs],
comment=history.comment,
)
def create_test_history(result: TestResult) -> TestHistory:
print(f"Creating test history {result}")
return TestHistory(
test_id=result.test_id, name=result.name, url=get_url(result), states=[result.result], comment=""
)
def get_history_status(history: TestHistory):
if len(history.states) < num_runs:
if all(history.states):
return "So far, so good"
return "Flaky"
if all(history.states):
return "Stable"
if all(history.states[0 : num_runs - 1]):
return "Just one more"
if all(history.states[0 : num_runs // 2]):
return "Almost there"
return "Flaky"
def get_table(history_map: dict[str, TestHistory]) -> str:
headers = ["Test", "Last run", f"Last {num_runs} runs", "Status", "Comment"]
the_table: list[list[str]] = []
for _, history in sorted(history_map.items()):
the_table.append(
[
history.url,
"Succeeded" if history.states[0] else "Failed",
" ".join(reverse_status_map[state] for state in history.states),
get_history_status(history),
history.comment,
]
)
return tabulate(the_table, headers, tablefmt="github")
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Provide XML JUNIT FILE as first argument")
sys.exit(1)
with open(sys.argv[1]) as f:
text = f.read()
y = BeautifulSoup(text, "html.parser")
testsuites = y.testsuites
if testsuites is None:
print("No testsuites found in the XML file")
sys.exit(1)
testsuite = testsuites.testsuite
if testsuite is None:
print(f"No testsuite found in the XML file in {testsuites}")
sys.exit(1)
res = testsuite.findAll("testcase") # type: ignore[call-arg]
for test in res:
print("Parsing: " + test["classname"] + "::" + test["name"])
if test.contents and test.contents[0].name == "skipped":
print(f"skipping {test['name']}")
else:
test_results.append(
TestResult(
test_id=test["classname"] + "::" + test["name"],
file=test["file"],
line=test["line"],
name=test["name"],
classname=test["classname"],
result=not test.contents,
)
)
token = os.environ.get("GITHUB_TOKEN")
print(f"Token: {token}")
github_repository = os.environ.get("GITHUB_REPOSITORY")
if not github_repository:
raise RuntimeError("GitHub Repository must be defined!")
user, repo = github_repository.split("/")
print(f"User: {user}, Repo: {repo}")
issue_id = int(os.environ.get("ISSUE_ID", str(0)))
num_runs = int(os.environ.get("NUM_RUNS", str(10)))
if issue_id == 0:
raise RuntimeError("You need to define ISSUE_ID as environment variable")
gh = login(token=token)
quarantined_issue = gh.issue(user, repo, issue_id)
print("-----")
print(quarantined_issue.body)
print("-----")
parsed_test_map = parse_body(quarantined_issue.body)
new_test_map: dict[str, TestHistory] = {}
for test_result in test_results:
previous_results = parsed_test_map.get(test_result.test_id)
if previous_results:
updated_results = update_test_history(previous_results, test_result.result)
new_test_map[previous_results.test_id] = updated_results
else:
new_history = create_test_history(test_result)
new_test_map[new_history.test_id] = new_history
table = get_table(new_test_map)
print()
print("Result:")
print()
print(table)
print()
with Path(__file__).resolve().with_name("quarantine_issue_header.md").open() as f:
header = jinja2.Template(f.read(), autoescape=True, undefined=StrictUndefined).render(
DATE_UTC_NOW=datetime.now(tz=timezone.utc).isoformat("T", timespec="seconds")
)
quarantined_issue.edit(
title=None, body=f"{header}\n\n{table}", state="open" if test_results else "closed"
)
| TestHistory |
python | huggingface__transformers | examples/quantization/custom_quantization_int8_example.py | {
"start": 5026,
"end": 9460
} | class ____(HfQuantizer):
"""
Implementation of INT8 symmetric quantization.
"""
requires_calibration = False
requires_parameters_quantization = True
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def _process_model_before_weight_loading(self, model, **kwargs):
"""
Replace model's linear layers with quantized versions before loading weights.
"""
self.modules_to_not_convert = self.quantization_config.modules_to_not_convert
model = replace_with_int8_symmetric_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def param_needs_quantization(self, model, param_name: str, **kwargs) -> bool:
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module, Int8SymmetricLinear):
if self.pre_quantized or tensor_name == "bias":
return False
else:
return True
return False
def create_quantized_param(
self,
model,
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
**kwargs,
):
# Sanity check
module, tensor_name = get_module_from_name(model, param_name)
if isinstance(module, Int8SymmetricLinear):
if self.pre_quantized or tensor_name == "bias":
if tensor_name == "weight" and param_value.dtype != torch.int8:
raise ValueError("Expect quantized weights but got an unquantized weight")
else:
if tensor_name == "weight_scale":
raise ValueError("Expect unquantized weights but got a quantized weight_scale")
abs_max_per_row = torch.max(torch.abs(param_value), dim=1, keepdim=True)[0].clamp(min=1e-5)
weight_scale = abs_max_per_row / 127.0
weight_quantized = torch.round(param_value / weight_scale).clamp(-128, 127).to(torch.int8)
module, tensor_name = get_module_from_name(model, param_name)
module._buffers[tensor_name] = weight_quantized.to(target_device)
module._buffers["weight_scale"] = weight_scale.to(target_device)
def update_missing_keys(self, model, missing_keys: list[str], prefix: str) -> list[str]:
not_missing_keys = []
for name, module in model.named_modules():
if isinstance(module, Int8SymmetricLinear):
for missing in missing_keys:
if (
(name in missing or name in f"{prefix}.{missing}")
and not missing.endswith(".weight")
and not missing.endswith(".bias")
):
not_missing_keys.append(missing)
return [k for k in missing_keys if k not in not_missing_keys]
def _process_model_after_weight_loading(self, model, **kwargs):
"""
Post-processing after weights are loaded.
"""
return True
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return False
# Example usage
if __name__ == "__main__":
model_int8 = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-3.2-1B", quantization_config=Int8SymmetricConfig(), dtype=torch.float, device_map="cpu"
)
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
input_text = "once there is"
inputs = tokenizer(input_text, return_tensors="pt").to("cpu")
output = model_int8.generate(
**inputs,
max_length=100,
num_return_sequences=1,
no_repeat_ngram_size=2,
)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
print(generated_text)
# Save and upload to HUB
output_model_dir = "Llama-3.2-1B-INT8-CUSTOM"
model_int8.save_pretrained(output_model_dir)
tokenizer.save_pretrained(output_model_dir)
api = HfApi()
repo_id = "medmekk/Llama-3.2-1B-INT8-CUSTOM"
api.create_repo(repo_id, private=False)
api.upload_folder(folder_path=output_model_dir, repo_id=repo_id, repo_type="model")
| Int8SymmetricQuantizer |
python | explosion__spaCy | spacy/ty.py | {
"start": 325,
"end": 708
} | class ____(Protocol):
model: Any
is_trainable: bool
def update(
self,
examples: Iterable["Example"],
*,
drop: float = 0.0,
sgd: Optional[Optimizer] = None,
losses: Optional[Dict[str, float]] = None
) -> Dict[str, float]: ...
def finish_update(self, sgd: Optimizer) -> None: ...
@runtime_checkable
| TrainableComponent |
python | pikepdf__pikepdf | src/pikepdf/objects.py | {
"start": 7111,
"end": 8508
} | class ____(Object, metaclass=_ObjectMeta):
"""Construct a PDF Dictionary object."""
object_type = ObjectType.dictionary
def __new__(cls, d: Mapping | None = None, **kwargs) -> Dictionary:
"""Construct a PDF Dictionary.
Works from either a Python ``dict`` or keyword arguments.
These two examples are equivalent:
.. code-block:: python
pikepdf.Dictionary({'/NameOne': 1, '/NameTwo': 'Two'})
pikepdf.Dictionary(NameOne=1, NameTwo='Two')
In either case, the keys must be strings, and the strings
correspond to the desired Names in the PDF Dictionary. The values
must all be convertible to `pikepdf.Object`.
"""
if kwargs and d is not None:
raise ValueError('Cannot use both a mapping object and keyword args')
if kwargs:
# Add leading slash
# Allows Dictionary(MediaBox=(0,0,1,1), Type=Name('/Page')...
return _core._new_dictionary({('/' + k): v for k, v in kwargs.items()})
if isinstance(d, Dictionary):
# Already a dictionary
return d.__copy__()
if not d:
d = {}
if d and any(key == '/' or not key.startswith('/') for key in d.keys()):
raise KeyError("Dictionary created from strings must begin with '/'")
return _core._new_dictionary(d)
| Dictionary |
python | pydantic__pydantic | pydantic/_internal/_namespace_utils.py | {
"start": 987,
"end": 2281
} | class ____(NamedTuple):
"""A tuple of globals and locals to be used during annotations evaluation.
This datastructure is defined as a named tuple so that it can easily be unpacked:
```python {lint="skip" test="skip"}
def eval_type(typ: type[Any], ns: NamespacesTuple) -> None:
return eval(typ, *ns)
```
"""
globals: GlobalsNamespace
"""The namespace to be used as the `globals` argument during annotations evaluation."""
locals: MappingNamespace
"""The namespace to be used as the `locals` argument during annotations evaluation."""
def get_module_ns_of(obj: Any) -> dict[str, Any]:
"""Get the namespace of the module where the object is defined.
Caution: this function does not return a copy of the module namespace, so the result
should not be mutated. The burden of enforcing this is on the caller.
"""
module_name = getattr(obj, '__module__', None)
if module_name:
try:
return sys.modules[module_name].__dict__
except KeyError:
# happens occasionally, see https://github.com/pydantic/pydantic/issues/2363
return {}
return {}
# Note that this class is almost identical to `collections.ChainMap`, but need to enforce
# immutable mappings here:
| NamespacesTuple |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/test_utils.py | {
"start": 19232,
"end": 19537
} | class ____(ThreadPoolExecutor):
"""Utility class for testing threadpool executor logic which executes functions in a single
thread, for easier unit testing.
"""
def __init__(self):
super().__init__(max_workers=1, thread_name_prefix="single_threaded_worker")
| SingleThreadPoolExecutor |
python | dask__dask | dask/bag/core.py | {
"start": 8170,
"end": 9862
} | class ____:
"""String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, key, *args, **kwargs):
return self._bag.map(operator.methodcaller(key, *args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, key))
else:
raise
def match(self, pattern):
"""Filter strings by those that match a pattern.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
"""A weak version of wraps that only copies doc."""
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
| StringAccessor |
python | kamyu104__LeetCode-Solutions | Python/rearrange-string-k-distance-apart.py | {
"start": 67,
"end": 973
} | class ____(object):
def rearrangeString(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
if not k:
return s
cnts = collections.Counter(s)
bucket_cnt = max(cnts.itervalues())
if not ((bucket_cnt-1)*k+sum(x == bucket_cnt for x in cnts.itervalues()) <= len(s)):
return ""
result = [0]*len(s)
i = (len(s)-1)%k
for c in itertools.chain((c for c, v in cnts.iteritems() if v == bucket_cnt), (c for c, v in cnts.iteritems() if v != bucket_cnt)):
for _ in xrange(cnts[c]):
result[i] = c
i += k
if i >= len(result):
i = (i-1)%k
return "".join(result)
# Time: O(n)
# Space: O(n)
import collections
import itertools
# reference: https://codeforces.com/blog/entry/110184 1774B - Coloring
| Solution |
python | kamyu104__LeetCode-Solutions | Python/minimum-deletions-to-make-array-beautiful.py | {
"start": 38,
"end": 337
} | class ____(object):
def minDeletion(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
for i in xrange(len(nums)-1):
result += int(i%2 == result%2 and nums[i] == nums[i+1])
return result+(len(nums)-result)%2
| Solution |
python | getsentry__sentry | src/sentry/monitors/serializers.py | {
"start": 3005,
"end": 4598
} | class ____(Serializer):
def get_attrs(
self, item_list: Sequence[Any], user: Any, **kwargs: Any
) -> MutableMapping[Any, Any]:
env_ids = [
monitor_env.environment_id for monitor_env in item_list if monitor_env.environment_id
]
environments = {env.id: env for env in Environment.objects.filter(id__in=env_ids)}
active_incidents = list(
MonitorIncident.objects.filter(
monitor_environment__in=item_list,
resolving_checkin=None,
)
)
serialized_incidents = {
incident.monitor_environment_id: serialized_incident
for incident, serialized_incident in zip(
active_incidents, serialize(active_incidents, user)
)
}
return {
monitor_env: {
"environment": environments[monitor_env.environment_id],
"active_incident": serialized_incidents.get(monitor_env.id),
}
for monitor_env in item_list
}
def serialize(self, obj, attrs, user, **kwargs) -> MonitorEnvironmentSerializerResponse:
return {
"name": attrs["environment"].name,
"status": obj.get_status_display(),
"isMuted": obj.is_muted,
"dateCreated": obj.monitor.date_added,
"lastCheckIn": obj.last_checkin,
"nextCheckIn": obj.next_checkin,
"nextCheckInLatest": obj.next_checkin_latest,
"activeIncident": attrs["active_incident"],
}
| MonitorEnvironmentSerializer |
python | pydata__xarray | xarray/core/coordinates.py | {
"start": 33407,
"end": 37204
} | class ____(Coordinates):
"""
Dictionary like container for coordinates of a DataTree node (variables + indexes).
This collection can be passed directly to the :py:class:`~xarray.Dataset`
and :py:class:`~xarray.DataArray` constructors via their `coords` argument.
This will add both the coordinates variables and their index.
"""
# TODO: This only needs to be a separate class from `DatasetCoordinates` because DataTree nodes store their variables differently
# internally than how Datasets do, see https://github.com/pydata/xarray/issues/9203.
_data: DataTree # type: ignore[assignment] # complaining that DataTree is not a subclass of DataWithCoords - this can be fixed by refactoring, see #9203
__slots__ = ("_data",)
def __init__(self, datatree: DataTree):
self._data = datatree
@property
def _names(self) -> set[Hashable]:
return set(self._data._coord_variables)
@property
def dims(self) -> Frozen[Hashable, int]:
# deliberately display all dims, not just those on coordinate variables - see https://github.com/pydata/xarray/issues/9466
return Frozen(self._data.dims)
@property
def dtypes(self) -> Frozen[Hashable, np.dtype]:
"""Mapping from coordinate names to dtypes.
Cannot be modified directly, but is updated when adding new variables.
See Also
--------
Dataset.dtypes
"""
return Frozen({n: v.dtype for n, v in self._data._coord_variables.items()})
@property
def variables(self) -> Mapping[Hashable, Variable]:
return Frozen(self._data._coord_variables)
def __getitem__(self, key: Hashable) -> DataArray:
if key not in self._data._coord_variables:
raise KeyError(key)
return self._data.dataset[key]
def to_dataset(self) -> Dataset:
"""Convert these coordinates into a new Dataset"""
return self._data.dataset._copy_listed(self._names)
def _update_coords(
self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index]
) -> None:
from xarray.core.datatree import check_alignment
# create updated node (`.to_dataset` makes a copy so this doesn't modify in-place)
node_ds = self._data.to_dataset(inherit=False)
node_ds.coords._update_coords(coords, indexes)
# check consistency *before* modifying anything in-place
# TODO can we clean up the signature of check_alignment to make this less awkward?
if self._data.parent is not None:
parent_ds = self._data.parent._to_dataset_view(
inherit=True, rebuild_dims=False
)
else:
parent_ds = None
check_alignment(self._data.path, node_ds, parent_ds, self._data.children)
# assign updated attributes
coord_variables = dict(node_ds.coords.variables)
self._data._node_coord_variables = coord_variables
self._data._node_dims = node_ds._dims
self._data._node_indexes = node_ds._indexes
def _drop_coords(self, coord_names):
# should drop indexed coordinates only
for name in coord_names:
del self._data._node_coord_variables[name]
del self._data._node_indexes[name]
def __delitem__(self, key: Hashable) -> None:
if key in self:
del self._data[key] # type: ignore[arg-type] # see https://github.com/pydata/xarray/issues/8836
else:
raise KeyError(key)
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython."""
return [
key
for key in self._data._ipython_key_completions_()
if key in self._data._coord_variables
]
| DataTreeCoordinates |
python | PrefectHQ__prefect | src/prefect/server/schemas/actions.py | {
"start": 1596,
"end": 2188
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create a flow."""
name: Name = Field(
default=..., description="The name of the flow", examples=["my-flow"]
)
tags: List[str] = Field(
default_factory=list,
description="A list of flow tags",
examples=[["tag-1", "tag-2"]],
)
labels: Union[KeyValueLabels, None] = Field(
default_factory=dict,
description="A dictionary of key-value labels. Values can be strings, numbers, or booleans.",
examples=[{"key": "value1", "key2": 42}],
)
| FlowCreate |
python | xlwings__xlwings | xlwings/pro/_xlremote.py | {
"start": 11022,
"end": 14484
} | class ____(base_classes.Sheet):
def __init__(self, api, sheets, index):
self._api = api
self._index = index
self.sheets = sheets
def append_json_action(self, **kwargs):
self.book.append_json_action(
**{
**kwargs,
**{
"sheet_position": self.index - 1,
},
}
)
@property
def api(self):
return self._api
@property
def name(self):
return self.api["name"]
@name.setter
def name(self, value):
self.append_json_action(
func="setSheetName",
args=value,
)
self.api["name"] = value
@property
def index(self):
return self._index
@property
def book(self):
return self.sheets.book
def range(self, arg1, arg2=None):
return Range(sheet=self, arg1=arg1, arg2=arg2)
@property
def cells(self):
return Range(
sheet=self,
arg1=(1, 1),
arg2=(MAX_ROWS, MAX_COLUMNS),
)
@property
def names(self):
api = [
name
for name in self.book.api["names"]
if name["scope_sheet_index"] is not None
and name["scope_sheet_index"] + 1 == self.index
and not name["book_scope"]
]
return Names(parent=self, api=api)
def activate(self):
ix = self.index - 1
self.book.api["book"]["active_sheet_index"] = ix
self.append_json_action(func="activateSheet", args=ix)
@property
def pictures(self):
return Pictures(self)
@property
def tables(self):
return Tables(parent=self)
def delete(self):
del self.book.api["sheets"][self.index - 1]
self.append_json_action(func="sheetDelete")
def clear(self):
self.append_json_action(func="sheetClear")
def clear_contents(self):
self.append_json_action(func="sheetClearContents")
def clear_formats(self):
self.append_json_action(func="sheetClearFormats")
@property
def freeze_panes(self):
return FreezePanes(self)
@lru_cache(None)
def get_range_api(api_values, arg1, arg2=None):
# Keeping this outside of the Range class allows us to cache it across multiple
# instances of the same range
if arg2:
values = [
row[arg1[1] - 1 : arg2[1]] for row in api_values[arg1[0] - 1 : arg2[0]]
]
if not values:
# Completely outside the used range
return [(None,) * (arg2[1] + 1 - arg1[1])] * (arg2[0] + 1 - arg1[0])
else:
# Partly outside the used range
nrows = arg2[0] + 1 - arg1[0]
ncols = arg2[1] + 1 - arg1[1]
nrows_actual = len(values)
ncols_actual = len(values[0])
delta_rows = nrows - nrows_actual
delta_cols = ncols - ncols_actual
if delta_rows != 0:
values.extend([(None,) * ncols_actual] * delta_rows)
if delta_cols != 0:
v = []
for row in values:
v.append(row + (None,) * delta_cols)
values = v
return values
else:
try:
values = [(api_values[arg1[0] - 1][arg1[1] - 1],)]
return values
except IndexError:
# Outside the used range
return [(None,)]
| Sheet |
python | getsentry__sentry | src/sentry/taskworker/retry.py | {
"start": 735,
"end": 1616
} | class ____(Enum):
Deadletter = 1
Discard = 2
def to_proto(self) -> OnAttemptsExceeded.ValueType:
if self == LastAction.Deadletter:
return ON_ATTEMPTS_EXCEEDED_DEADLETTER
if self == LastAction.Discard:
return ON_ATTEMPTS_EXCEEDED_DISCARD
raise ValueError(f"Unknown LastAction: {self}")
def retry_task(exc: Exception | None = None, raise_on_no_retries: bool = True) -> None:
"""
Helper for triggering retry errors.
If all retries have been consumed, this will raise a
sentry.taskworker.retry.NoRetriesRemaining
"""
current = current_task()
if current and not current.retries_remaining:
metrics.incr("taskworker.retry.no_retries_remaining")
if raise_on_no_retries:
raise NoRetriesRemainingError()
else:
return
raise RetryTaskError()
| LastAction |
python | huggingface__transformers | tests/test_tokenization_mistral_common.py | {
"start": 127985,
"end": 215414
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.repo_id = "hf-internal-testing/namespace-mistralai-repo_name-Mistral-Small-3.1-24B-Instruct-2503"
# determine if we already have this downloaded
cls.local_files_only = len(list_local_hf_repo_files(cls.repo_id, revision=None)) > 0
cls.tokenizer: MistralCommonBackend = AutoTokenizer.from_pretrained(
cls.repo_id,
tokenizer_type="mistral",
local_files_only=cls.local_files_only,
# This is a hack as `list_local_hf_repo_files` from `mistral_common` has a bug
# TODO: Discuss with `mistral-common` maintainers: after a fix being done there, remove this `revision` hack
revision=None,
)
cls.ref_tokenizer: MistralTokenizer = MistralTokenizer.from_hf_hub(
cls.repo_id, local_files_only=cls.local_files_only
)
# Define SPM tokenizer to test the private methods that handle SPM and Tekken differencies.
cls.spm_repo_id = "mistralai/Mistral-7B-v0.3"
# cls.tokenizer_audio: MistralCommonBackend = AutoTokenizer.from_pretrained(
# "hf-internal-testing/namesspace-mistralai-repo_name-Voxtral-Mini-3B-2507"
# )
repo_id = "mistralai/Voxtral-Mini-3B-2507"
local_files_only = len(list_local_hf_repo_files(repo_id, revision=None)) > 0
cls.tokenizer_audio: MistralCommonBackend = AutoTokenizer.from_pretrained(
repo_id,
local_files_only=local_files_only,
revision=None,
)
cls.ref_tokenizer_audio: MistralCommonBackend = MistralTokenizer.from_hf_hub(
repo_id, local_files_only=local_files_only
)
cls.fixture_conversations = [
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
],
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "assistant", "content": "Hello! How can I help you?"},
{"role": "user", "content": "What is the temperature in Paris?"},
],
]
cls.tokenized_fixture_conversations = [
cls.ref_tokenizer.encode_chat_completion(ChatCompletionRequest.from_openai(conversation))
for conversation in cls.fixture_conversations
]
cls.ref_special_ids = {t["rank"] for t in cls.ref_tokenizer.instruct_tokenizer.tokenizer._all_special_tokens}
@classmethod
def tearDownClass(cls):
del cls.tokenizer
del cls.ref_tokenizer
del cls.tokenizer_audio
del cls.ref_tokenizer_audio
del cls.fixture_conversations
del cls.tokenized_fixture_conversations
del cls.ref_special_ids
gc.collect()
# Copy paste of `MistralCommonBackend._tekken_piece_to_id`
def _ref_piece_to_id(self, piece: str) -> int:
tekken_tokenizer = self.ref_tokenizer.instruct_tokenizer.tokenizer
piece_bytes = piece.encode("utf-8")
shift = tekken_tokenizer.num_special_tokens
try:
return shift + tekken_tokenizer._tekken_token2id_nospecial[piece_bytes]
except KeyError:
piece_str = piece_bytes.decode("utf-8")
if piece_str in tekken_tokenizer._special_tokens_reverse_vocab:
return tekken_tokenizer._special_tokens_reverse_vocab[piece_str]
return tekken_tokenizer.unk_id
def _get_spm_tokenizer(self) -> MistralCommonBackend:
local_files_only = len(list_local_hf_repo_files(self.spm_repo_id, revision=None)) > 0
return AutoTokenizer.from_pretrained(
self.spm_repo_id,
local_files_only=local_files_only,
revision=None,
tokenizer_type="mistral",
)
def test_spm_vs_tekken_is_control_token(self):
spm_tokenizer = self._get_spm_tokenizer()
self.assertTrue(spm_tokenizer._is_control_token(1))
self.assertTrue(spm_tokenizer._is_control_token(768))
self.assertFalse(spm_tokenizer._is_control_token(2000))
self.assertTrue(self.tokenizer._is_control_token(0))
self.assertTrue(self.tokenizer._is_control_token(768))
self.assertTrue(self.tokenizer._is_control_token(999))
self.assertFalse(self.tokenizer._is_control_token(1000))
def test_spm_vs_tekken_piece_to_id(self):
spm_tokenizer = self._get_spm_tokenizer()
self.assertEqual(spm_tokenizer._piece_to_id("<s>", False), 1)
self.assertEqual(spm_tokenizer._piece_to_id("h", False), 29484)
self.assertEqual(self.tokenizer._piece_to_id("<s>", False), 1)
self.assertEqual(self._ref_piece_to_id("<s>"), 1)
self.assertEqual(self.tokenizer._piece_to_id("\u0000", False), 1000)
self.assertEqual(self._ref_piece_to_id("\u0000"), 1000)
self.assertEqual(self.tokenizer._piece_to_id(" String", False), 3000)
self.assertEqual(self._ref_piece_to_id(" String"), 3000)
self.assertEqual(self.tokenizer._piece_to_id("后汉书", False), 131071)
self.assertEqual(self._ref_piece_to_id("后汉书"), 131071)
def test_vocab_size(self):
self.assertEqual(self.tokenizer.vocab_size, self.ref_tokenizer.instruct_tokenizer.tokenizer.n_words)
def test_save_pretrained(self):
with tempfile.TemporaryDirectory() as tmp_dir:
self.tokenizer.save_pretrained(tmp_dir)
loaded_tokenizer = MistralCommonBackend.from_pretrained(tmp_dir)
self.assertIsNotNone(loaded_tokenizer)
self.assertEqual(self.tokenizer.get_vocab(), loaded_tokenizer.get_vocab())
self.assertEqual(
self.tokenizer.tokenizer.instruct_tokenizer.tokenizer.version,
loaded_tokenizer.tokenizer.instruct_tokenizer.tokenizer.version,
)
with self.assertRaises(
ValueError, msg="Kwargs [unk_args] are not supported by `MistralCommonBackend.save_pretrained`."
):
with tempfile.TemporaryDirectory() as tmp_dir:
self.tokenizer.save_pretrained(tmp_dir, unk_args="")
def test_encode(self):
string = "Hello, world!"
# Test 1:
# encode with add_special_tokens
expected_with_special = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(string, bos=True, eos=True)
tokens_with_special = self.tokenizer.encode(string, add_special_tokens=True)
self.assertEqual(tokens_with_special, expected_with_special)
# Test 2:
# encode without add_special_tokens
expected_without_special = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(string, bos=False, eos=False)
tokens_without_special = self.tokenizer.encode(string, add_special_tokens=False)
self.assertEqual(tokens_without_special, expected_without_special)
# Test 3:
# encode with return_tensors
tokens_with_return_tensors = self.tokenizer.encode(string, add_special_tokens=False, return_tensors="pt")
self.assertIsInstance(tokens_with_return_tensors, torch.Tensor)
self.assertEqual(tokens_with_return_tensors.tolist()[0], expected_without_special)
# Test 4:
# encode with max_length
tokens_with_max_length = self.tokenizer.encode(string, add_special_tokens=False, max_length=3)
self.assertEqual(tokens_with_max_length, expected_without_special[:3])
# Test 5:
# encode with padding
tokens_with_padding = self.tokenizer.encode(
string, add_special_tokens=False, padding=True, pad_to_multiple_of=6
)
expected_padding = [self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id] * (
6 - len(expected_without_special) % 6
) + expected_without_special
self.assertEqual(tokens_with_padding, expected_padding)
for padding in [
False,
True,
"longest",
"max_length",
"do_not_pad",
PaddingStrategy.LONGEST,
PaddingStrategy.MAX_LENGTH,
PaddingStrategy.DO_NOT_PAD,
]:
tokens_with_padding = self.tokenizer.encode(string, add_special_tokens=False, padding=padding)
self.assertEqual(tokens_with_padding, expected_without_special)
# For truncation, we use a longer string
string_long = (
"Hello world! It is a beautiful day today. The sun is shining brightly and the birds are singing."
)
expected_long = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(string_long, bos=False, eos=False)
# Test 6:
# encode with truncation
tokens_with_truncation = self.tokenizer.encode(
string_long, add_special_tokens=False, truncation=True, max_length=12
)
self.assertEqual(tokens_with_truncation, expected_long[:12])
# Test 7:
# encode with padding and truncation
tokens_with_padding_and_truncation = self.tokenizer.encode(
string_long, add_special_tokens=False, padding=True, pad_to_multiple_of=12, truncation=True, max_length=36
)
expected_long_padding = [self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id] * (
12 - len(expected_long) % 12
) + expected_long
self.assertEqual(tokens_with_padding_and_truncation, expected_long_padding)
# Test encode with unsupported kwargs
with self.assertRaises(
ValueError, msg="Kwargs [unk_args] are not supported by `MistralCommonBackend.encode`."
):
self.tokenizer.encode("Hello, world!", add_special_tokens=True, unk_args="")
def test_decode(self):
string = "Hello, world!"
string_with_space = "Hello, world !"
tokens_ids = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(string, bos=True, eos=True)
tokens_ids_with_space = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(
string_with_space, bos=True, eos=True
)
# Test 1:
# decode with and without skip_special_tokens
self.assertEqual(self.tokenizer.decode(tokens_ids, skip_special_tokens=True), string)
self.assertEqual(self.tokenizer.decode(tokens_ids, skip_special_tokens=False), "<s>" + string + "</s>")
self.assertEqual(self.tokenizer.decode(tokens_ids_with_space, skip_special_tokens=True), string_with_space)
# Test 2:
# decode with clean_up_tokenization_spaces
self.assertEqual(
self.tokenizer.decode(tokens_ids_with_space, skip_special_tokens=True, clean_up_tokenization_spaces=True),
"Hello, world!",
)
# Test 3:
# decode with unsupported kwargs
with self.assertRaises(
ValueError, msg="Kwargs [unk_args] are not supported by `MistralCommonBackend.decode`."
):
self.tokenizer.decode(tokens_ids, skip_special_tokens=False, unk_args="")
def test_decode_transcription_mode(self):
# in the specific case of Voxtral, the added f"lang:xx" (always a two char language code since it follows ISO 639-1 alpha-2 format)
# is not considered as a special token by mistral-common and is encoded/ decoded as normal text.
# we made the explicit choice of skipping "lang:xx" it to ease users life, see `[~MistralCommonBackend.decode]`
expected_string = "lang:en[TRANSCRIBE]"
openai_transcription_request = {
"model": None,
"language": "en",
"file": io.BytesIO(base64.b64decode(AUDIO_BASE_64)),
}
transcription_request = TranscriptionRequest.from_openai(openai_transcription_request)
tokenized_transcription_request = self.ref_tokenizer_audio.encode_transcription(transcription_request)
# without skip_special_tokens
self.assertEqual(
self.tokenizer_audio.decode(tokenized_transcription_request.tokens, skip_special_tokens=False)[
-len(expected_string) :
],
expected_string,
)
# with skip_special_tokens
self.assertEqual(self.tokenizer.decode(tokenized_transcription_request.tokens, skip_special_tokens=True), "")
def test_batch_decode(self):
string = "Hello, world!"
string_with_space = "Hello, world !"
batch_tokens_ids = [
self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(string, bos=True, eos=True),
self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(string_with_space, bos=True, eos=True),
]
# Test 1:
# batch_decode with and without skip_special_tokens
self.assertEqual(
self.tokenizer.batch_decode(batch_tokens_ids, skip_special_tokens=True),
[string, string_with_space],
)
self.assertEqual(
self.tokenizer.batch_decode(batch_tokens_ids, skip_special_tokens=False),
["<s>" + string + "</s>", "<s>" + string_with_space + "</s>"],
)
self.assertEqual(
self.tokenizer.batch_decode(batch_tokens_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True),
["Hello, world!", "Hello, world!"],
)
# Test 2:
# batch_decode with unsupported kwargs
with self.assertRaises(
ValueError, msg="Kwargs [unk_args] are not supported by `MistralCommonBackend.batch_decode`."
):
self.tokenizer.batch_decode(batch_tokens_ids, skip_special_tokens=False, unk_args="")
def test_convert_ids_to_tokens(self):
# Test 1:
# with skip_special_tokens=False
ids = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode("Hello world!", bos=True, eos=True)
expected_tokens = [self.ref_tokenizer.instruct_tokenizer.tokenizer.id_to_piece(id) for id in ids]
tokens = self.tokenizer.convert_ids_to_tokens(ids, skip_special_tokens=False)
self.assertEqual(tokens, expected_tokens)
token = self.tokenizer.convert_ids_to_tokens(ids[0], skip_special_tokens=False)
self.assertEqual(token, expected_tokens[0])
# Test 2:
# with skip_special_tokens=True
expected_tokens = expected_tokens[1:-1]
tokens = self.tokenizer.convert_ids_to_tokens(ids, skip_special_tokens=True)
self.assertEqual(tokens, expected_tokens)
with self.assertRaises(ValueError):
self.tokenizer.convert_ids_to_tokens(ids[0], skip_special_tokens=True)
token = self.tokenizer.convert_ids_to_tokens(ids[1], skip_special_tokens=True)
self.assertEqual(token, expected_tokens[0])
def test_convert_tokens_to_ids(self):
tokens = ["Hello", "world", "!"]
expected_ids = [self._ref_piece_to_id(token) for token in tokens]
# Test 1:
# list of tokens
ids = self.tokenizer.convert_tokens_to_ids(tokens)
self.assertEqual(ids, expected_ids)
# Test 2:
# single token
id = self.tokenizer.convert_tokens_to_ids(tokens[0])
self.assertEqual(id, expected_ids[0])
self.assertEqual(id, self.tokenizer.convert_tokens_to_ids(tokens[0]))
def test_tokenize(self):
string = "Hello world!"
expected_tokens = [
self.ref_tokenizer.instruct_tokenizer.tokenizer.id_to_piece(id)
for id in self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(string, bos=False, eos=False)
]
tokens = self.tokenizer.tokenize(string)
self.assertEqual(tokens, expected_tokens)
with self.assertRaises(
ValueError, msg="Kwargs [add_special_tokens] are not supported by `MistralCommonBackend.tokenize`."
):
self.tokenizer.tokenize(string, add_special_tokens=True)
def test_get_special_tokens_mask(self):
# Test 1:
# with skip_special_tokens=False
ids = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode("Hello world!", bos=True, eos=True)
expected_mask = [1 if id in self.ref_special_ids else 0 for id in ids]
mask = self.tokenizer.get_special_tokens_mask(ids)
self.assertEqual(mask, expected_mask)
# Test 2:
# already_has_special_tokens=True should raise an error
with self.assertRaises(ValueError):
self.tokenizer.get_special_tokens_mask(ids, already_has_special_tokens=True)
# Test 3:
# token_ids_1 not None should raise an error
with self.assertRaises(ValueError):
self.tokenizer.get_special_tokens_mask(ids, token_ids_1=ids)
def test_pad_batch_encoding_input(self):
# Test 1:
# padding and default values
def get_batch_encoding():
return self.tokenizer("Hello world!", return_special_tokens_mask=True)
batch_encoding = get_batch_encoding()
for padding in [
False,
True,
"longest",
"max_length",
"do_not_pad",
PaddingStrategy.LONGEST,
PaddingStrategy.MAX_LENGTH,
PaddingStrategy.DO_NOT_PAD,
]:
padded_batch_encoding = self.tokenizer.pad(get_batch_encoding(), padding=padding)
self.assertEqual(padded_batch_encoding, batch_encoding)
# Test 2:
# padding_strategy="max_length" or PaddingStrategy.MAX_LENGTH and max_length
for padding in ["max_length", PaddingStrategy.MAX_LENGTH]:
padded_batch_encoding = self.tokenizer.pad(get_batch_encoding(), padding=padding, max_length=12)
self.assertEqual(
padded_batch_encoding["input_ids"],
[self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id] * (12 - len(batch_encoding["input_ids"]))
+ batch_encoding["input_ids"],
)
self.assertEqual(
padded_batch_encoding["attention_mask"],
[0] * (12 - len(batch_encoding["input_ids"])) + batch_encoding["attention_mask"],
)
self.assertEqual(
padded_batch_encoding["special_tokens_mask"],
[1] * (12 - len(batch_encoding["input_ids"])) + batch_encoding["special_tokens_mask"],
)
# Test 3:
# padding_strategy=True or "longest" or PaddingStrategy.LONGEST or "max_length" or PaddingStrategy.MAX_LENGTH and pad_to_multiple_of 16
for padding in [True, "longest", PaddingStrategy.LONGEST]:
padded_batch_encoding = self.tokenizer.pad(get_batch_encoding(), padding=padding, pad_to_multiple_of=16)
self.assertEqual(
padded_batch_encoding["input_ids"],
[self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id] * (16 - len(batch_encoding["input_ids"]))
+ batch_encoding["input_ids"],
)
self.assertEqual(
padded_batch_encoding["attention_mask"],
[0] * (16 - len(batch_encoding["input_ids"])) + batch_encoding["attention_mask"],
)
self.assertEqual(
padded_batch_encoding["special_tokens_mask"],
[1] * (16 - len(batch_encoding["input_ids"])) + batch_encoding["special_tokens_mask"],
)
# Test 4:
# padding_side="right"
right_tokenizer = MistralCommonBackend.from_pretrained(
self.repo_id,
local_files_only=self.local_files_only,
padding_side="right",
revision=None,
)
right_paddings = [
right_tokenizer.pad(get_batch_encoding(), padding="max_length", max_length=12),
self.tokenizer.pad(get_batch_encoding(), padding="max_length", max_length=12, padding_side="right"),
]
for padded_batch_encoding in right_paddings:
self.assertEqual(
padded_batch_encoding["input_ids"],
batch_encoding["input_ids"]
+ [self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id] * (12 - len(batch_encoding["input_ids"])),
)
self.assertEqual(
padded_batch_encoding["attention_mask"],
batch_encoding["attention_mask"] + [0] * (12 - len(batch_encoding["input_ids"])),
)
self.assertEqual(
padded_batch_encoding["special_tokens_mask"],
batch_encoding["special_tokens_mask"] + [1] * (12 - len(batch_encoding["input_ids"])),
)
# Test 5:
# return_attention_mask=False
padded_batch_encoding = self.tokenizer.pad(
get_batch_encoding(), padding="max_length", max_length=12, return_attention_mask=False
)
self.assertEqual(
padded_batch_encoding["input_ids"],
[self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id] * (12 - len(batch_encoding["input_ids"]))
+ batch_encoding["input_ids"],
)
self.assertEqual(padded_batch_encoding["attention_mask"], batch_encoding["attention_mask"])
self.assertEqual(
padded_batch_encoding["special_tokens_mask"],
[1] * (12 - len(batch_encoding["input_ids"])) + batch_encoding["special_tokens_mask"],
)
# Test 6:
# return_tensors="pt" or "np"
for return_tensors in ["pt", "np"]:
padded_batch_encoding = self.tokenizer.pad(
get_batch_encoding(), padding="max_length", max_length=12, return_tensors=return_tensors
)
self.assertEqual(padded_batch_encoding["input_ids"].shape, torch.Size((12,)))
self.assertEqual(padded_batch_encoding["attention_mask"].shape, torch.Size((12,)))
self.assertEqual(padded_batch_encoding["special_tokens_mask"].shape, torch.Size((12,)))
def test_list_batch_encoding_input(self):
def get_batch_encoding():
return self.tokenizer(["Hello world!", "Hello world! Longer sentence."], return_special_tokens_mask=True)
# Test 1:
# padding=True or "longest" or PaddingStrategy.LONGEST
batch_encoding = get_batch_encoding()
for padding in [
True,
"longest",
PaddingStrategy.LONGEST,
]:
padded_batch_encoding = self.tokenizer.pad(get_batch_encoding(), padding=padding)
self.assertEqual(
padded_batch_encoding["input_ids"],
[
[self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id]
* (len(batch_encoding["input_ids"][1]) - len(batch_encoding["input_ids"][0]))
+ batch_encoding["input_ids"][0],
batch_encoding["input_ids"][1],
],
)
self.assertEqual(
padded_batch_encoding["attention_mask"],
[
[0] * (len(batch_encoding["input_ids"][1]) - len(batch_encoding["input_ids"][0]))
+ batch_encoding["attention_mask"][0],
batch_encoding["attention_mask"][1],
],
)
self.assertEqual(
padded_batch_encoding["special_tokens_mask"],
[
[1] * (len(batch_encoding["input_ids"][1]) - len(batch_encoding["input_ids"][0]))
+ batch_encoding["special_tokens_mask"][0],
batch_encoding["special_tokens_mask"][1],
],
)
# Test 2:
# padding_strategy="max_length" or PaddingStrategy.MAX_LENGTH and max_length
for padding in ["max_length", PaddingStrategy.MAX_LENGTH]:
padded_batch_encoding = self.tokenizer.pad(get_batch_encoding(), padding=padding, max_length=12)
self.assertEqual(
padded_batch_encoding["input_ids"],
[
[self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id]
* (12 - len(batch_encoding["input_ids"][0]))
+ batch_encoding["input_ids"][0],
[self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id]
* (12 - len(batch_encoding["input_ids"][1]))
+ batch_encoding["input_ids"][1],
],
)
self.assertEqual(
padded_batch_encoding["attention_mask"],
[
[0] * (12 - len(batch_encoding["input_ids"][0])) + batch_encoding["attention_mask"][0],
[0] * (12 - len(batch_encoding["input_ids"][1])) + batch_encoding["attention_mask"][1],
],
)
self.assertEqual(
padded_batch_encoding["special_tokens_mask"],
[
[1] * (12 - len(batch_encoding["input_ids"][0])) + batch_encoding["special_tokens_mask"][0],
[1] * (12 - len(batch_encoding["input_ids"][1])) + batch_encoding["special_tokens_mask"][1],
],
)
# Test 3:
# padding_strategy=True or "longest" or PaddingStrategy.LONGEST or "max_length" or PaddingStrategy.MAX_LENGTH and pad_to_multiple_of 16
for padding in [True, "longest", PaddingStrategy.LONGEST]:
padded_batch_encoding = self.tokenizer.pad(get_batch_encoding(), padding=padding, pad_to_multiple_of=16)
self.assertEqual(
padded_batch_encoding["input_ids"],
[
[self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id]
* (16 - len(batch_encoding["input_ids"][0]))
+ batch_encoding["input_ids"][0],
[self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id]
* (16 - len(batch_encoding["input_ids"][1]))
+ batch_encoding["input_ids"][1],
],
)
self.assertEqual(
padded_batch_encoding["attention_mask"],
[
[0] * (16 - len(batch_encoding["input_ids"][0])) + batch_encoding["attention_mask"][0],
[0] * (16 - len(batch_encoding["input_ids"][1])) + batch_encoding["attention_mask"][1],
],
)
self.assertEqual(
padded_batch_encoding["special_tokens_mask"],
[
[1] * (16 - len(batch_encoding["input_ids"][0])) + batch_encoding["special_tokens_mask"][0],
[1] * (16 - len(batch_encoding["input_ids"][1])) + batch_encoding["special_tokens_mask"][1],
],
)
# Test 4:
# padding_side="right"
right_tokenizer = MistralCommonBackend.from_pretrained(
self.repo_id,
local_files_only=self.local_files_only,
padding_side="right",
revision=None,
)
right_paddings = [
right_tokenizer.pad(get_batch_encoding(), padding="max_length", max_length=12),
self.tokenizer.pad(get_batch_encoding(), padding="max_length", max_length=12, padding_side="right"),
]
for padded_batch_encoding in right_paddings:
self.assertEqual(
padded_batch_encoding["input_ids"],
[
batch_encoding["input_ids"][0]
+ [self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id]
* (12 - len(batch_encoding["input_ids"][0])),
batch_encoding["input_ids"][1]
+ [self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id]
* (12 - len(batch_encoding["input_ids"][1])),
],
)
self.assertEqual(
padded_batch_encoding["attention_mask"],
[
batch_encoding["attention_mask"][0] + [0] * (12 - len(batch_encoding["input_ids"][0])),
batch_encoding["attention_mask"][1] + [0] * (12 - len(batch_encoding["input_ids"][1])),
],
)
self.assertEqual(
padded_batch_encoding["special_tokens_mask"],
[
batch_encoding["special_tokens_mask"][0] + [1] * (12 - len(batch_encoding["input_ids"][0])),
batch_encoding["special_tokens_mask"][1] + [1] * (12 - len(batch_encoding["input_ids"][1])),
],
)
# Test 5:
# return_attention_mask=False
padded_batch_encoding = self.tokenizer.pad(
get_batch_encoding(), padding="max_length", max_length=12, return_attention_mask=False
)
self.assertEqual(
padded_batch_encoding["input_ids"],
[
[self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id] * (12 - len(batch_encoding["input_ids"][0]))
+ batch_encoding["input_ids"][0],
[self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id] * (12 - len(batch_encoding["input_ids"][1]))
+ batch_encoding["input_ids"][1],
],
)
self.assertEqual(padded_batch_encoding["attention_mask"], batch_encoding["attention_mask"])
self.assertEqual(
padded_batch_encoding["special_tokens_mask"],
[
[1] * (12 - len(batch_encoding["input_ids"][0])) + batch_encoding["special_tokens_mask"][0],
[1] * (12 - len(batch_encoding["input_ids"][1])) + batch_encoding["special_tokens_mask"][1],
],
)
# Test 6:
# return_tensors="pt" or "np"
for return_tensors in ["pt", "np"]:
padded_batch_encoding = self.tokenizer.pad(
get_batch_encoding(), padding="max_length", max_length=12, return_tensors=return_tensors
)
self.assertEqual(padded_batch_encoding["input_ids"].shape, torch.Size((2, 12)))
self.assertEqual(padded_batch_encoding["attention_mask"].shape, torch.Size((2, 12)))
self.assertEqual(padded_batch_encoding["special_tokens_mask"].shape, torch.Size((2, 12)))
def test_truncate_sequences(self):
# Test 1:
# truncation_strategy="longest_first" or TruncationStrategy.LONGEST_FIRST
text = "Hello world!"
ids = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(text, bos=True, eos=True)
for truncation in ["longest_first", TruncationStrategy.LONGEST_FIRST]:
for num_tokens_to_remove in [0, 2]:
tokens, none, overflowing_tokens = self.tokenizer.truncate_sequences(
ids, truncation_strategy=truncation, num_tokens_to_remove=num_tokens_to_remove
)
self.assertEqual(tokens, ids[:-num_tokens_to_remove] if num_tokens_to_remove > 0 else ids)
self.assertIsNone(none)
self.assertEqual(overflowing_tokens, ids[-num_tokens_to_remove:] if num_tokens_to_remove > 0 else [])
# Test 2:
# truncation_strategy="only_first" or "only_second" or TruncationStrategy.ONLY_FIRST or TruncationStrategy.ONLY_SECOND
# Should raise a ValueError
for truncation in ["only_first", "only_second", TruncationStrategy.ONLY_FIRST, TruncationStrategy.ONLY_SECOND]:
with self.assertRaises(ValueError):
self.tokenizer.truncate_sequences(ids, truncation_strategy=truncation, num_tokens_to_remove=1)
# Test 3:
# truncation_strategy="do_not_truncate" or TruncationStrategy.DO_NOT_TRUNCATE
for truncation in ["do_not_truncate", TruncationStrategy.DO_NOT_TRUNCATE]:
tokens, none, overflowing_tokens = self.tokenizer.truncate_sequences(
ids, truncation_strategy=truncation, num_tokens_to_remove=1
)
self.assertEqual(tokens, ids)
self.assertIsNone(none)
self.assertEqual(overflowing_tokens, [])
# Test 4:
# pair_ids is not None
# Should raise a ValueError
with self.assertRaises(ValueError):
self.tokenizer.truncate_sequences(
ids, pair_ids=ids, truncation_strategy="longest_first", num_tokens_to_remove=1
)
# Test 5:
# stride
for stride in [0, 2]:
tokens, none, overflowing_tokens = self.tokenizer.truncate_sequences(
ids, truncation_strategy="longest_first", num_tokens_to_remove=2, stride=stride
)
self.assertEqual(tokens, ids[:-2])
self.assertIsNone(none)
self.assertEqual(overflowing_tokens, ids[-2 - stride :])
# Test 6:
# truncation_side="left"
left_tokenizer = MistralCommonBackend.from_pretrained(
self.repo_id,
local_files_only=self.local_files_only,
truncation_side="left",
revision=None,
)
tokens, none, overflowing_tokens = left_tokenizer.truncate_sequences(
ids, truncation_strategy="longest_first", num_tokens_to_remove=2
)
self.assertEqual(tokens, ids[2:])
self.assertIsNone(none)
self.assertEqual(overflowing_tokens, ids[:2])
def test_apply_chat_template_basic(self):
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "assistant", "content": "Hello! How can I help you?"},
{"role": "user", "content": "What is the capital of France?"},
]
expected_tokenized = self.ref_tokenizer.encode_chat_completion(ChatCompletionRequest.from_openai(conversation))
# Test 1:
# with tokenize
self.assertEqual(
self.tokenizer.apply_chat_template(conversation, tokenize=False),
expected_tokenized.text,
)
# Test 2:
# without tokenize
self.assertEqual(
self.tokenizer.apply_chat_template(conversation, tokenize=True).input_ids, expected_tokenized.tokens
)
with self.assertRaises(
ValueError, msg="Kwargs [unk_args] are not supported by `MistralCommonBackend.apply_chat_template`."
):
self.tokenizer.apply_chat_template(conversation, tokenize=True, unk_args="")
def test_apply_chat_template_continue_final_message(self):
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "assistant", "content": "Hello! How can I help you?"},
{"role": "user", "content": "What is the capital of France?"},
{"role": "assistant", "content": "Paris"},
]
expected_tokenized = self.ref_tokenizer.encode_chat_completion(
ChatCompletionRequest.from_openai(conversation, continue_final_message=True)
)
self.assertEqual(
self.tokenizer.apply_chat_template(conversation, tokenize=False, continue_final_message=True),
expected_tokenized.text,
)
self.assertEqual(
self.tokenizer.apply_chat_template(conversation, tokenize=True, continue_final_message=True).input_ids,
expected_tokenized.tokens,
)
with self.assertRaises(InvalidMessageStructureException):
self.tokenizer.apply_chat_template(conversation, tokenize=False, continue_final_message=False)
def test_apply_chat_template_with_add_generation_prompt(self):
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
]
# Test 1:
# with add_generation_prompt
for add_generation_prompt in [False, True]:
expected_tokenized = self.ref_tokenizer.encode_chat_completion(
ChatCompletionRequest.from_openai(conversation)
)
token_outputs = self.tokenizer.apply_chat_template(
conversation, tokenize=True, add_generation_prompt=add_generation_prompt
)
self.assertEqual(token_outputs.input_ids, expected_tokenized.tokens)
# Test 2:
# with continue_final_message
with self.assertRaises(ValueError):
self.tokenizer.apply_chat_template(
conversation, tokenize=True, add_generation_prompt=True, continue_final_message=True
)
# Test 3:
# with last message with assistant role
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "asistant", "content": "Hey!"},
]
with self.assertRaises(ValueError):
self.tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True)
def test_apply_chat_template_with_tools(self):
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "assistant", "content": "Hello! How can I help you?"},
{"role": "user", "content": "What is the temperature in Paris?"},
{
"role": "assistant",
"tool_calls": [
{
"id": "azerty123",
"function": {
"name": "get_current_weather",
"arguments": {"location": "Paris", "format": "text", "unit": "celsius"},
},
}
],
},
{"role": "tool", "name": "get_current_weather", "content": "22", "tool_call_id": "azerty123"},
]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
"required": ["location"],
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
"format": {
"type": "string",
"enum": ["text", "json"],
"description": "The format of the response",
"required": ["format"],
},
},
},
},
}
]
expected_tokenized = self.ref_tokenizer.encode_chat_completion(
ChatCompletionRequest.from_openai(conversation, tools)
)
self.assertEqual(
self.tokenizer.apply_chat_template(conversation, tools=tools, tokenize=False),
expected_tokenized.text,
)
def test_apply_chat_template_with_image(self):
ref_conversation = conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
{"type": "text", "text": "What is this?"},
{
"type": "image_url",
"image_url": {"url": IMG_URL},
},
],
},
]
expected_tokenized = self.ref_tokenizer.encode_chat_completion(
ChatCompletionRequest.from_openai(ref_conversation)
)
image_contents = [
{
"type": "image_url",
"image_url": {"url": IMG_URL},
},
{
"type": "image",
"url": IMG_URL,
},
{"type": "image", "base64": IMG_BASE_64},
]
for image_content in image_contents:
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [{"type": "text", "text": "What is this?"}, image_content],
},
]
output = self.tokenizer.apply_chat_template(conversation).input_ids
self.assertEqual(output, expected_tokenized.tokens)
output_dict = self.tokenizer.apply_chat_template(conversation, tokenize=True)
self.assertEqual(output_dict["input_ids"], expected_tokenized.tokens)
self.assertEqual(len(output_dict["pixel_values"]), len(expected_tokenized.images))
for o, e in zip(output_dict["pixel_values"], expected_tokenized.images):
self.assertTrue(np.allclose(o, e))
output_dict = self.tokenizer.apply_chat_template(conversation, tokenize=True, return_tensors="pt")
self.assertEqual(output_dict["input_ids"].tolist()[0], expected_tokenized.tokens)
expected_images_pt_tensor = torch.from_numpy(np.stack(expected_tokenized.images))
self.assertTrue(torch.allclose(output_dict["pixel_values"], expected_images_pt_tensor))
def test_apply_chat_template_with_audio(self):
ref_conversation = conversation = [
{
"role": "user",
"content": [
{"type": "text", "text": "What is this?"},
{
"type": "input_audio",
"input_audio": {
"data": AUDIO_BASE_64,
"format": "wav",
},
},
],
},
]
expected_tokenized = self.ref_tokenizer_audio.encode_chat_completion(
ChatCompletionRequest.from_openai(ref_conversation)
)
audio_contents = [
{
"type": "audio",
"url": AUDIO_URL,
},
{
"type": "audio",
"path": AUDIO_URL,
},
{"type": "audio", "base64": AUDIO_BASE_64},
]
for audio_content in audio_contents:
conversation = [
{
"role": "user",
"content": [{"type": "text", "text": "What is this?"}, audio_content],
},
]
output = self.tokenizer_audio.apply_chat_template(conversation, tokenize=True).input_ids
self.assertEqual(output, expected_tokenized.tokens)
output_dict = self.tokenizer_audio.apply_chat_template(conversation, tokenize=True, return_dict=True)
self.assertEqual(output_dict["input_ids"], expected_tokenized.tokens)
self.assertEqual(len(output_dict["audio"]), len(expected_tokenized.audios))
for o, e in zip(output_dict["audio"], expected_tokenized.audios):
audio_array = e.audio_array
self.assertTrue(np.allclose(o, audio_array))
with self.assertRaises(NotImplementedError):
output_dict = self.tokenizer_audio.apply_chat_template(
conversation, tokenize=True, return_dict=True, return_tensors="pt"
)
def test_apply_chat_template_with_truncation(self):
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "assistant", "content": "Hello! How can I help you?"},
{"role": "user", "content": "What is the capital of France?"},
]
expected_tokenized = self.ref_tokenizer.encode_chat_completion(ChatCompletionRequest.from_openai(conversation))
# Test 1:
# with truncation
self.assertEqual(
self.tokenizer.apply_chat_template(conversation, tokenize=True, truncation=True, max_length=20).input_ids,
expected_tokenized.tokens[:20],
)
# Test 2:
# without truncation
self.assertEqual(
self.tokenizer.apply_chat_template(conversation, tokenize=True, truncation=False, max_length=20).input_ids,
expected_tokenized.tokens,
)
# Test 3:
# assert truncation is boolean
with self.assertRaises(TypeError):
self.tokenizer.apply_chat_template(
conversation, tokenize=True, truncation=TruncationStrategy.LONGEST_FIRST, max_length=20
)
def test_batch_apply_chat_template(self):
conversations = [
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "assistant", "content": "Hello! How can I help you?"},
{
"role": "user",
"content": [
{"type": "text", "text": "What is this?"},
{
"type": "image_url",
"image_url": {"url": IMG_URL},
},
],
},
],
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "assistant", "content": "Hello! How can I help you?"},
{"role": "user", "content": "What is the temperature in Paris?"},
{
"role": "assistant",
"tool_calls": [
{
"id": "azerty123",
"function": {
"name": "get_current_weather",
"arguments": {"location": "Paris", "format": "text", "unit": "celsius"},
},
}
],
},
{"role": "tool", "name": "get_current_weather", "content": "22", "tool_call_id": "azerty123"},
],
]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
"required": ["location"],
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
"format": {
"type": "string",
"enum": ["text", "json"],
"description": "The format of the response",
"required": ["format"],
},
},
},
},
}
]
expected_tokenized = [
self.ref_tokenizer.encode_chat_completion(ChatCompletionRequest.from_openai(conversation, tools=tools))
for conversation in conversations
]
text_outputs = self.tokenizer.apply_chat_template(conversations, tools=tools, tokenize=False)
token_outputs = self.tokenizer.apply_chat_template(conversations, tools=tools, tokenize=True).input_ids
self.assertEqual(len(text_outputs), len(token_outputs))
self.assertEqual(len(text_outputs), len(expected_tokenized))
for text, token, expected in zip(text_outputs, token_outputs, expected_tokenized):
self.assertEqual(text, expected.text)
self.assertEqual(token, expected.tokens)
with self.assertRaises(
ValueError,
msg="Kwargs [unk_args] are not supported by `MistralCommonBackend.batch_apply_chat_template`.",
):
self.tokenizer.apply_chat_template(conversations, tools=tools, tokenize=True, unk_args="")
def test_batch_apply_chat_template_images(self):
conversations = [
[
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
{"type": "text", "text": "What is this?"},
{
"type": "image_url",
"image_url": {"url": IMG_URL},
},
],
},
],
[
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
{"type": "text", "text": "What is this?"},
{
"type": "image",
"url": IMG_URL,
},
],
},
],
[
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
{"type": "text", "text": "What is this?"},
{"type": "image", "base64": IMG_BASE_64},
],
},
],
]
ref_conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
{"type": "text", "text": "What is this?"},
{
"type": "image_url",
"image_url": {"url": IMG_URL},
},
],
},
]
expected_tokenized = self.ref_tokenizer.encode_chat_completion(
ChatCompletionRequest.from_openai(ref_conversation)
)
output = self.tokenizer.apply_chat_template(conversations, tokenize=True).input_ids
self.assertEqual(output, [expected_tokenized.tokens] * 3)
output = self.tokenizer.apply_chat_template(conversations, tokenize=True, return_dict=True)
self.assertEqual(output["input_ids"], [expected_tokenized.tokens] * 3)
self.assertEqual(len(output["pixel_values"]), len(expected_tokenized.images) * 3)
for o, e in zip(output["pixel_values"], [expected_tokenized.images] * 3):
self.assertTrue(np.allclose(o, e))
output = self.tokenizer.apply_chat_template(
conversations, tokenize=True, return_dict=True, return_tensors="pt"
)
self.assertEqual(output["input_ids"].tolist(), [expected_tokenized.tokens] * 3)
self.assertEqual(output["input_ids"].shape[0], len(expected_tokenized.images) * 3)
expected_images_pt_tensor = torch.from_numpy(np.stack([expected_tokenized.images] * 3))
self.assertTrue(torch.allclose(output["pixel_values"], expected_images_pt_tensor))
output = self.tokenizer.apply_chat_template(
conversations, tokenize=True, return_dict=True, return_tensors="np"
)
self.assertEqual(output["input_ids"].tolist(), [expected_tokenized.tokens] * 3)
self.assertTrue(np.allclose(output["pixel_values"], np.array([expected_tokenized.images] * 3)))
def test_batch_apply_chat_template_with_continue_final_message(self):
conversations = [
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "assistant", "content": "Hello! How can "},
],
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "assistant", "content": "Hello! How can I help you? Ou préférez vous "},
],
]
# Test 1:
# with continue_final_message
expected_tokenized = [
self.ref_tokenizer.encode_chat_completion(
ChatCompletionRequest.from_openai(conversation, continue_final_message=True)
)
for conversation in conversations
]
token_outputs = self.tokenizer.apply_chat_template(
conversations, tokenize=True, continue_final_message=True
).input_ids
for output, expected in zip(token_outputs, expected_tokenized):
self.assertEqual(output, expected.tokens)
# Test 2:
# without continue_final_message
with self.assertRaises(InvalidMessageStructureException):
self.tokenizer.apply_chat_template(
conversations,
tokenize=False,
continue_final_message=False,
)
# Test 3:
# with continue_final_message and last role is not assistant
with self.assertRaises(InvalidMessageStructureException):
self.tokenizer.apply_chat_template(
conversation=[
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
]
],
tokenize=True,
continue_final_message=True,
)
def test_batch_apply_chat_template_with_add_generation_prompt(self):
conversations = [
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
],
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
],
]
# Test 1:
# with add_generation_prompt
for add_generation_prompt in [False, True]:
expected_tokenized = [
self.ref_tokenizer.encode_chat_completion(ChatCompletionRequest.from_openai(conversation))
for conversation in conversations
]
token_outputs = self.tokenizer.apply_chat_template(
conversations, tokenize=True, add_generation_prompt=add_generation_prompt
).input_ids
for output, expected in zip(token_outputs, expected_tokenized):
self.assertEqual(output, expected.tokens)
# Test 2:
# with continue_final_message
with self.assertRaises(ValueError):
self.tokenizer.apply_chat_template(
conversations, tokenize=True, add_generation_prompt=True, continue_final_message=True
)
# Test 3:
# with last message with assistant role
conversations = [
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
{"role": "asistant", "content": "Hey!"},
],
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hi!"},
],
]
with self.assertRaises(ValueError):
self.tokenizer.apply_chat_template(conversations, tokenize=True, add_generation_prompt=True)
def test_batch_apply_chat_template_with_truncation(
self,
):
# Test 1:
# with truncation
token_outputs = self.tokenizer.apply_chat_template(
self.fixture_conversations, tokenize=True, truncation=True, max_length=20
).input_ids
for output, expected in zip(token_outputs, self.tokenized_fixture_conversations):
self.assertEqual(output, expected.tokens[:20])
# Test 2:
# without truncation
token_outputs = self.tokenizer.apply_chat_template(
self.fixture_conversations, tokenize=True, truncation=False, max_length=20
).input_ids
self.assertEqual(len(token_outputs), len(self.tokenized_fixture_conversations))
for output, expected in zip(token_outputs, self.tokenized_fixture_conversations):
self.assertEqual(output, expected.tokens)
# Test 3:
# assert truncation is boolean
with self.assertRaises(TypeError):
self.tokenizer.apply_chat_template(
self.fixture_conversations, tokenize=True, truncation=TruncationStrategy.LONGEST_FIRST, max_length=20
)
def test_batch_apply_chat_template_with_padding(
self,
):
for padding in [True, "max_length", PaddingStrategy.LONGEST, PaddingStrategy.MAX_LENGTH]:
if padding == PaddingStrategy.MAX_LENGTH:
# No padding if no max length is provided
token_outputs = self.tokenizer.apply_chat_template(
self.fixture_conversations, padding=padding, return_dict=False
)
self.assertEqual(len(token_outputs), len(self.tokenized_fixture_conversations))
for output, expected in zip(token_outputs, self.tokenized_fixture_conversations):
self.assertEqual(output, expected.tokens)
max_length = 20 if padding == PaddingStrategy.MAX_LENGTH else None
token_outputs = self.tokenizer.apply_chat_template(
self.fixture_conversations, tokenize=True, padding=padding, max_length=max_length, return_dict=False
)
if padding != PaddingStrategy.MAX_LENGTH:
longest = max(len(tokenized.tokens) for tokenized in self.tokenized_fixture_conversations)
self.assertEqual(len(token_outputs), len(self.tokenized_fixture_conversations))
for output, expected in zip(token_outputs, self.tokenized_fixture_conversations):
self.assertEqual(
output,
[self.tokenizer.pad_token_id] * (longest - len(expected.tokens)) + expected.tokens,
)
else:
self.assertEqual(len(token_outputs), len(self.tokenized_fixture_conversations))
for output, expected in zip(token_outputs, self.tokenized_fixture_conversations):
if len(expected.tokens) < max_length:
self.assertEqual(
output,
[self.tokenizer.pad_token_id] * (20 - len(expected.tokens)) + expected.tokens,
)
else:
self.assertEqual(output, expected.tokens)
for padding in [False, "do_not_pad", PaddingStrategy.DO_NOT_PAD]:
token_outputs = self.tokenizer.apply_chat_template(
self.fixture_conversations, tokenize=True, padding=padding, return_dict=False
)
self.assertEqual(len(token_outputs), len(self.tokenized_fixture_conversations))
for output, expected in zip(token_outputs, self.tokenized_fixture_conversations):
self.assertEqual(output, expected.tokens)
def test_batch_apply_chat_template_with_padding_and_truncation(
self,
):
max_length = 20
for padding in [True, "max_length", PaddingStrategy.LONGEST, PaddingStrategy.MAX_LENGTH]:
token_outputs = self.tokenizer.apply_chat_template(
self.fixture_conversations,
tokenize=True,
truncation=True,
padding=padding,
max_length=max_length,
return_dict=False,
)
self.assertEqual(len(token_outputs), len(self.tokenized_fixture_conversations))
for output, expected in zip(token_outputs, self.tokenized_fixture_conversations):
self.assertEqual(
output, [self.tokenizer.pad_token_id] * (20 - len(expected.tokens)) + expected.tokens[:20]
)
for padding in [False, "do_not_pad", PaddingStrategy.DO_NOT_PAD]:
token_outputs = self.tokenizer.apply_chat_template(
self.fixture_conversations,
tokenize=True,
truncation=True,
padding=padding,
max_length=max_length,
return_dict=False,
)
self.assertEqual(len(token_outputs), len(self.tokenized_fixture_conversations))
for output, expected in zip(token_outputs, self.tokenized_fixture_conversations):
self.assertEqual(output, expected.tokens[:20])
def test_batch_apply_chat_template_return_tensors(self):
# Test 1:
# with tokenize
token_outputs = self.tokenizer.apply_chat_template(
self.fixture_conversations, tokenize=True, return_tensors="pt", padding=True, return_dict=False
)
self.assertIsInstance(token_outputs, torch.Tensor)
self.assertEqual(
token_outputs.shape,
(len(self.fixture_conversations), max(len(t.tokens) for t in self.tokenized_fixture_conversations)),
)
# Test 2:
# without tokenize, should ignore return_tensors
token_outputs = self.tokenizer.apply_chat_template(
self.fixture_conversations, tokenize=False, return_tensors="pt", padding=True, return_dict=False
)
self.assertEqual(token_outputs, [t.text for t in self.tokenized_fixture_conversations])
def test_batch_apply_chat_template_return_dict(self):
# Test 1:
# with tokenize
token_outputs = self.tokenizer.apply_chat_template(self.fixture_conversations, tokenize=True, return_dict=True)
self.assertIn("input_ids", token_outputs)
self.assertIn("attention_mask", token_outputs)
self.assertEqual(token_outputs["input_ids"], [t.tokens for t in self.tokenized_fixture_conversations])
self.assertEqual(
token_outputs["attention_mask"], [[1] * len(t.tokens) for t in self.tokenized_fixture_conversations]
)
# Test 2:
# without tokenize, should ignore return_dict
token_outputs = self.tokenizer.apply_chat_template(
self.fixture_conversations, tokenize=False, return_dict=True
)
self.assertNotIsInstance(token_outputs, dict)
self.assertEqual(token_outputs, [t.text for t in self.tokenized_fixture_conversations])
def test_call(self):
# Test 1:
# default case
text = "Hello world!"
expected_tokens = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(text, bos=True, eos=True)
tokens = self.tokenizer(text)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertEqual(tokens["attention_mask"], [1] * len(expected_tokens))
# Test 2:
# return_attention_mask=False
tokens = self.tokenizer(text, return_attention_mask=False)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertNotIn("attention_mask", tokens)
# Test 3:
# return_tensors="pt"
tokens = self.tokenizer(text, return_tensors="pt")
self.assertIsInstance(tokens["input_ids"], torch.Tensor)
self.assertTrue(torch.equal(tokens["input_ids"], torch.Tensor(expected_tokens).unsqueeze(0)))
self.assertIsInstance(tokens["attention_mask"], torch.Tensor)
self.assertTrue(torch.equal(tokens["attention_mask"], torch.ones(1, len(expected_tokens))))
# Test 4:
# return_special_tokens_mask=True
tokens = self.tokenizer(text, return_special_tokens_mask=True)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertEqual(tokens["attention_mask"], [1] * len(expected_tokens))
self.assertEqual(tokens["special_tokens_mask"], [1] + [0] * (len(expected_tokens) - 2) + [1])
# Test 5:
# add_special_tokens=False
expected_tokens = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(text, bos=False, eos=False)
tokens = self.tokenizer(text, add_special_tokens=False, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertEqual(tokens["attention_mask"], [1] * len(expected_tokens))
self.assertEqual(tokens["special_tokens_mask"], [0] * len(expected_tokens))
with self.assertRaises(
ValueError, msg="Kwargs [wrong_kwarg] are not supported by `MistralCommonBackend.__call__`."
):
self.tokenizer(text, wrong_kwarg=True)
with self.assertRaises(
ValueError,
msg="`text_pair`, `text_target` and `text_pair_target` are not supported by `MistralCommonBackend`.",
):
self.tokenizer(text, text_pair="Hello world!")
with self.assertRaises(
ValueError,
msg="`text_pair`, `text_target` and `text_pair_target` are not supported by `MistralCommonBackend`.",
):
self.tokenizer(text, text_target="Hello world!")
with self.assertRaises(
ValueError,
msg="`text_pair`, `text_target` and `text_pair_target` are not supported by `MistralCommonBackend`.",
):
self.tokenizer(text, text_pair_target="Hello world!")
def test_call_with_truncation(self):
# Test 1:
# truncation=True or "longest_first" or TruncationStrategy.LONGEST_FIRST
text = "Hello world!" * 10
expected_tokens = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(text, bos=True, eos=True)
for truncation in [True, "longest_first", TruncationStrategy.LONGEST_FIRST]:
tokens = self.tokenizer(text, truncation=True, max_length=10, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], expected_tokens[:10])
self.assertEqual(tokens["attention_mask"], [1] * 10)
self.assertEqual(tokens["special_tokens_mask"], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# Test 2:
# truncation=False
for truncation in [False, "do_not_truncate", TruncationStrategy.DO_NOT_TRUNCATE]:
tokens = self.tokenizer(text, truncation=truncation, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertEqual(tokens["attention_mask"], [1] * len(expected_tokens))
self.assertEqual(tokens["special_tokens_mask"], [1] + [0] * (len(expected_tokens) - 2) + [1])
# Test 3:
# truncation=True or "longest_first" or TruncationStrategy.LONGEST_FIRST with return_overflowing_tokens=True and stride
for truncation in [True, "longest_first", TruncationStrategy.LONGEST_FIRST]:
for stride in [0, 2]:
tokens = self.tokenizer(
text,
truncation=truncation,
max_length=10,
return_overflowing_tokens=True,
return_special_tokens_mask=True,
stride=stride,
)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], expected_tokens[:10])
self.assertEqual(tokens["attention_mask"], [1] * 10)
self.assertEqual(tokens["special_tokens_mask"], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.assertEqual(tokens["overflowing_tokens"], expected_tokens[10 - stride :])
self.assertEqual(tokens["num_truncated_tokens"], len(expected_tokens) - 10)
# Test 4:
# truncation="only_first" or TruncationStrategy.ONLY_FIRST or "only_second" or TruncationStrategy.ONLY_SECOND
# should raise an error
for truncation in ["only_first", TruncationStrategy.ONLY_FIRST, "only_second", TruncationStrategy.ONLY_SECOND]:
with self.assertRaises(
ValueError,
msg="Truncation strategy `only_first` and `only_second` are not supported by `MistralCommonBackend`.",
):
self.tokenizer(text, truncation=truncation)
def test_call_with_padding(self):
text = "Hello world!"
expected_tokens = self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(text, bos=True, eos=True)
# Test 1:
# padding=False or padding=True or "do_not_pad" or PaddingStrategy.DO_NOT_PAD or padding="longest" or PaddingStrategy.LONGEST
for padding in [False, True, "do_not_pad", PaddingStrategy.DO_NOT_PAD, "longest", PaddingStrategy.LONGEST]:
tokens = self.tokenizer(text, padding=padding, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertEqual(tokens["attention_mask"], [1] * len(expected_tokens))
self.assertEqual(tokens["special_tokens_mask"], [1] + [0] * (len(expected_tokens) - 2) + [1])
# Test 2:
# padding="max_length" or PaddingStrategy.MAX_LENGTH
for padding in ["max_length", PaddingStrategy.MAX_LENGTH]:
tokens = self.tokenizer(text, padding=padding, max_length=20, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
num_padding = 20 - len(expected_tokens)
self.assertEqual(tokens["input_ids"], num_padding * [self.tokenizer.pad_token_id] + expected_tokens)
self.assertEqual(tokens["attention_mask"], num_padding * [0] + [1] * len(expected_tokens))
self.assertEqual(
tokens["special_tokens_mask"], num_padding * [1] + [1] + [0] * (len(expected_tokens) - 2) + [1]
)
# Test 3:
# pad_to_multiple_of
tokens = self.tokenizer(
text, padding=True, max_length=20, pad_to_multiple_of=16, return_special_tokens_mask=True
)
self.assertIsInstance(tokens, BatchEncoding)
num_padding = 16 - len(expected_tokens)
self.assertEqual(tokens["input_ids"], num_padding * [self.tokenizer.pad_token_id] + expected_tokens)
self.assertEqual(tokens["attention_mask"], num_padding * [0] + [1] * len(expected_tokens))
self.assertEqual(
tokens["special_tokens_mask"], num_padding * [1] + [1] + [0] * (len(expected_tokens) - 2) + [1]
)
# Test 4:
# padding="max_length" and padding_side="right"
tokens = self.tokenizer(
text, padding="max_length", max_length=20, padding_side="right", return_special_tokens_mask=True
)
self.assertIsInstance(tokens, BatchEncoding)
num_padding = 20 - len(expected_tokens)
self.assertEqual(tokens["input_ids"], expected_tokens + num_padding * [self.tokenizer.pad_token_id])
self.assertEqual(tokens["attention_mask"], [1] * len(expected_tokens) + num_padding * [0])
self.assertEqual(
tokens["special_tokens_mask"], [1] + [0] * (len(expected_tokens) - 2) + [1] + num_padding * [1]
)
def test_batch_call(self):
# Test 1:
# default case
text = ["Hello world!", "Hello world! Longer"]
expected_tokens = [self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(t, bos=True, eos=True) for t in text]
tokens = self.tokenizer(text)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertEqual(tokens["attention_mask"], [[1] * len(t) for t in expected_tokens])
# Test 2:
# return_attention_mask=False
tokens = self.tokenizer(text, return_attention_mask=False)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertNotIn("attention_mask", tokens)
# Test 3:
# return_tensors="pt"
tokens = self.tokenizer(text, return_tensors="pt", padding="longest", return_special_tokens_mask=True)
self.assertIsInstance(tokens["input_ids"], torch.Tensor)
self.assertEqual(tokens["input_ids"].shape, torch.Size([2, len(expected_tokens[1])]))
self.assertTrue(
torch.equal(
tokens["input_ids"][0],
torch.Tensor(
(len(expected_tokens[1]) - len(expected_tokens[0]))
* [self.ref_tokenizer.instruct_tokenizer.tokenizer.pad_id]
+ expected_tokens[0]
),
)
)
self.assertIsInstance(tokens["attention_mask"], torch.Tensor)
self.assertEqual(tokens["attention_mask"].shape, torch.Size([2, len(expected_tokens[1])]))
self.assertTrue(
torch.equal(
tokens["attention_mask"][0],
torch.Tensor(
[0] * (len(expected_tokens[1]) - len(expected_tokens[0])) + [1] * len(expected_tokens[0])
),
)
)
self.assertTrue(torch.equal(tokens["attention_mask"][1], torch.Tensor([1] * len(expected_tokens[1]))))
self.assertIsInstance(tokens["special_tokens_mask"], torch.Tensor)
self.assertEqual(tokens["special_tokens_mask"].shape, torch.Size([2, len(expected_tokens[1])]))
self.assertTrue(
torch.equal(
tokens["special_tokens_mask"][0],
torch.Tensor(
(len(expected_tokens[1]) - len(expected_tokens[0])) * [1]
+ [1]
+ [0] * (len(expected_tokens[0]) - 2)
+ [1]
),
)
)
self.assertTrue(
torch.equal(
tokens["special_tokens_mask"][1], torch.Tensor([1] + [0] * (len(expected_tokens[1]) - 2) + [1])
)
)
# Test 4:
# add_special_tokens=False
expected_tokens = [
self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(t, bos=False, eos=False) for t in text
]
tokens = self.tokenizer(text, add_special_tokens=False, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertEqual(tokens["attention_mask"], [[1] * len(t) for t in expected_tokens])
self.assertEqual(tokens["special_tokens_mask"], [[0] * len(t) for t in expected_tokens])
def test_batch_call_with_truncation(self):
# Test 1:
# truncation=True
text = ["Hello world!", "Hello world! Longer" * 10]
expected_tokens = [self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(t, bos=True, eos=True) for t in text]
for truncation in [True, "longest_first", TruncationStrategy.LONGEST_FIRST]:
tokens = self.tokenizer(text, truncation=True, max_length=10, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], [expected_tokens[0][:10], expected_tokens[1][:10]])
self.assertEqual(tokens["attention_mask"], [[1] * min(len(t), 10) for t in expected_tokens])
self.assertEqual(
tokens["special_tokens_mask"],
[[1 if id in self.ref_special_ids else 0 for id in ids[:10]] for ids in expected_tokens],
)
# Test 2:
# truncation=False
for truncation in [False, "do_not_truncate", TruncationStrategy.DO_NOT_TRUNCATE]:
tokens = self.tokenizer(text, truncation=truncation, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertEqual(tokens["attention_mask"], [[1] * len(t) for t in expected_tokens])
self.assertEqual(
tokens["special_tokens_mask"],
[[1] + [0] * (len(t) - 2) + [1] for t in expected_tokens],
)
# Test 3:
# truncation=True or "longest_first" or TruncationStrategy.LONGEST_FIRST with return_overflowing_tokens=True and stride
for truncation in [True, "longest_first", TruncationStrategy.LONGEST_FIRST]:
for stride in [0, 2]:
tokens = self.tokenizer(
text,
truncation=truncation,
max_length=10,
return_overflowing_tokens=True,
return_special_tokens_mask=True,
stride=stride,
)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], [expected_tokens[0][:10], expected_tokens[1][:10]])
self.assertEqual(tokens["attention_mask"], [[1] * min(len(t), 10) for t in expected_tokens])
self.assertEqual(
tokens["overflowing_tokens"],
[expected_tokens[0][10 - stride :], expected_tokens[1][10 - stride :]],
)
self.assertEqual(
tokens["num_truncated_tokens"], [len(expected_tokens[0]) - 10, len(expected_tokens[1]) - 10]
)
self.assertEqual(
tokens["special_tokens_mask"],
[[1 if id in self.ref_special_ids else 0 for id in ids[:10]] for ids in expected_tokens],
)
def test_batch_call_with_padding(self):
# Test 1:
# padding=False or padding=True or "do_not_pad" or PaddingStrategy.DO_NOT_PAD or padding="longest" or PaddingStrategy.LONGEST
text = ["Hello world!", "Hello world! Longer"]
expected_tokens = [self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(t, bos=True, eos=True) for t in text]
for padding in [False, "do_not_pad", PaddingStrategy.DO_NOT_PAD]:
tokens = self.tokenizer(text, padding=padding, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(tokens["input_ids"], expected_tokens)
self.assertEqual(tokens["attention_mask"], [[1] * len(t) for t in expected_tokens])
self.assertEqual(
tokens["special_tokens_mask"],
[[1] + [0] * (len(t) - 2) + [1] for t in expected_tokens],
)
# Test 2:
# padding="max_length" or PaddingStrategy.MAX_LENGTH
for padding in ["max_length", PaddingStrategy.MAX_LENGTH]:
tokens = self.tokenizer(text, padding=padding, max_length=20, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
num_padding = [20 - len(t) for t in expected_tokens]
self.assertEqual(
tokens["input_ids"],
[
num_padding[0] * [self.tokenizer.pad_token_id] + expected_tokens[0],
num_padding[1] * [self.tokenizer.pad_token_id] + expected_tokens[1],
],
)
self.assertEqual(
tokens["attention_mask"],
[
num_padding[0] * [0] + [1] * len(expected_tokens[0]),
num_padding[1] * [0] + [1] * len(expected_tokens[1]),
],
)
self.assertEqual(
tokens["special_tokens_mask"],
[
num_padding[0] * [1] + [1] + [0] * (len(expected_tokens[0]) - 2) + [1],
num_padding[1] * [1] + [1] + [0] * (len(expected_tokens[1]) - 2) + [1],
],
)
# Test 3:
# padding=True or "longest" or PaddingStrategy.LONGEST
for padding in [True, "longest", PaddingStrategy.LONGEST]:
tokens = self.tokenizer(text, padding=padding, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
num_padding = [len(expected_tokens[1]) - len(t) for t in expected_tokens]
self.assertEqual(
tokens["input_ids"],
[
num_padding[0] * [self.tokenizer.pad_token_id] + expected_tokens[0],
num_padding[1] * [self.tokenizer.pad_token_id] + expected_tokens[1],
],
)
self.assertEqual(
tokens["attention_mask"],
[
num_padding[0] * [0] + [1] * len(expected_tokens[0]),
num_padding[1] * [0] + [1] * len(expected_tokens[1]),
],
)
self.assertEqual(
tokens["special_tokens_mask"],
[
num_padding[0] * [1] + [1] + [0] * (len(expected_tokens[0]) - 2) + [1],
num_padding[1] * [1] + [1] + [0] * (len(expected_tokens[1]) - 2) + [1],
],
)
# Test 4:
# pad_to_multiple_of
tokens = self.tokenizer(
text, padding=True, max_length=32, pad_to_multiple_of=16, return_special_tokens_mask=True
)
self.assertIsInstance(tokens, BatchEncoding)
num_padding = [16 - len(t) for t in expected_tokens]
self.assertEqual(
tokens["input_ids"],
[
num_padding[0] * [self.tokenizer.pad_token_id] + expected_tokens[0],
num_padding[1] * [self.tokenizer.pad_token_id] + expected_tokens[1],
],
)
self.assertEqual(
tokens["attention_mask"],
[
num_padding[0] * [0] + [1] * len(expected_tokens[0]),
num_padding[1] * [0] + [1] * len(expected_tokens[1]),
],
)
self.assertEqual(
tokens["special_tokens_mask"],
[
num_padding[0] * [1] + [1] + [0] * (len(expected_tokens[0]) - 2) + [1],
num_padding[1] * [1] + [1] + [0] * (len(expected_tokens[1]) - 2) + [1],
],
)
# Test 5:
# padding="max_length" or PaddingStrategy.MAX_LENGTH and padding_side="right"
for padding in ["max_length", PaddingStrategy.MAX_LENGTH]:
tokens = self.tokenizer(
text, padding=padding, max_length=20, padding_side="right", return_special_tokens_mask=True
)
self.assertIsInstance(tokens, BatchEncoding)
num_padding = [20 - len(t) for t in expected_tokens]
self.assertEqual(
tokens["input_ids"],
[
expected_tokens[0] + num_padding[0] * [self.tokenizer.pad_token_id],
expected_tokens[1] + num_padding[1] * [self.tokenizer.pad_token_id],
],
)
self.assertEqual(
tokens["attention_mask"],
[
[1] * len(expected_tokens[0]) + num_padding[0] * [0],
[1] * len(expected_tokens[1]) + num_padding[1] * [0],
],
)
self.assertEqual(
tokens["special_tokens_mask"],
[
[1] + [0] * (len(expected_tokens[0]) - 2) + [1] + num_padding[0] * [1],
[1] + [0] * (len(expected_tokens[1]) - 2) + [1] + num_padding[1] * [1],
],
)
def test_batch_call_with_padding_and_truncation(self):
# Test 1:
# padding=True or "longest" or PaddingStrategy.LONGEST or "max_length" or PaddingStragy.MAX_LENGTH
# and truncation=True or "longest_first" or TruncationStrategy.LONGEST_FIRST
# and max_length
text = ["Hello world!", "Hello world! Longer" * 10]
expected_tokens = [self.ref_tokenizer.instruct_tokenizer.tokenizer.encode(t, bos=True, eos=True) for t in text]
for padding in [True, "longest", PaddingStrategy.LONGEST, "max_length", PaddingStrategy.MAX_LENGTH]:
for truncation in [True, "longest_first", TruncationStrategy.LONGEST_FIRST]:
tokens = self.tokenizer(
text, padding=padding, truncation=truncation, max_length=10, return_special_tokens_mask=True
)
num_padding = [max(0, 10 - len(t)) for t in expected_tokens]
self.assertIsInstance(tokens, BatchEncoding)
self.assertEqual(
tokens["input_ids"],
[num_padding[i] * [self.tokenizer.pad_token_id] + t[:10] for i, t in enumerate(expected_tokens)],
)
self.assertEqual(
tokens["attention_mask"],
[num_padding[i] * [0] + [1] * min(len(t), 10) for i, t in enumerate(expected_tokens)],
)
self.assertEqual(
tokens["special_tokens_mask"],
[
num_padding[i] * [1] + [1 if id in self.ref_special_ids else 0 for id in ids[:10]]
for i, ids in enumerate(expected_tokens)
],
)
# Test 2:
# padding=True or "longest" or PaddingStrategy.LONGEST and truncation=True or "longest_first" or TruncationStrategy.LONGEST_FIRST
# and no max_length
for padding in ["longest", PaddingStrategy.LONGEST]:
for truncation in [True, "longest_first", TruncationStrategy.LONGEST_FIRST]:
tokens = self.tokenizer(text, padding=padding, truncation=truncation, return_special_tokens_mask=True)
self.assertIsInstance(tokens, BatchEncoding)
num_padding = [max(len(t) for t in expected_tokens) - len(t) for t in expected_tokens]
self.assertEqual(
tokens["input_ids"],
[num_padding[i] * [self.tokenizer.pad_token_id] + t for i, t in enumerate(expected_tokens)],
)
self.assertEqual(
tokens["attention_mask"],
[num_padding[i] * [0] + [1] * len(t) for i, t in enumerate(expected_tokens)],
)
self.assertEqual(
tokens["special_tokens_mask"],
[
num_padding[i] * [1] + [1 if id in self.ref_special_ids else 0 for id in ids]
for i, ids in enumerate(expected_tokens)
],
)
def test_get_vocab(self):
vocab = self.tokenizer.get_vocab()
# loss of some tokens due to conversion
self.assertNotEqual(len(vocab), len(self.tokenizer))
for token, id_token in vocab.items():
# Issue during conversion
if id_token == 0 and token != "<unk>":
continue
self.assertEqual(self.tokenizer.convert_tokens_to_ids(token), id_token)
self.assertEqual(
self.ref_tokenizer.decode([id_token], special_token_policy=SpecialTokenPolicy.KEEP), token
)
| TestMistralCommonBackend |
python | astropy__astropy | astropy/cosmology/_src/traits/hubble.py | {
"start": 512,
"end": 1607
} | class ____:
"""The object has attributes and methods for the Hubble parameter."""
H0: Quantity
"""Hubble Parameter at redshift 0."""
efunc: Callable[[Any], NDArray[Any]]
inv_efunc: Callable[[Any], FArray | float]
def H(self, z: Quantity | ArrayLike, /) -> Quantity:
"""Hubble parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like
Input redshift.
Returns
-------
H : Quantity ['frequency']
Hubble parameter at each input redshift.
"""
return self.H0 * self.efunc(z)
@cached_property
def h(self) -> np.floating:
"""Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]."""
return self.H0.to_value("km/(s Mpc)") / 100.0
@cached_property
def hubble_time(self) -> u.Quantity:
"""Hubble time."""
return (1 / self.H0).to(u.Gyr)
@cached_property
def hubble_distance(self) -> u.Quantity:
"""Hubble distance."""
return (const.c / self.H0).to(u.Mpc)
| HubbleParameter |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_object_position20.py | {
"start": 315,
"end": 930
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position20.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(1, 1, 5, None, {"hidden": 1})
worksheet.insert_image("B9", self.image_dir + "red.png", {"x_offset": 128})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | torch/nn/parallel/_functions.py | {
"start": 3196,
"end": 4946
} | class ____(Function):
@staticmethod
def forward(ctx, target_gpus, chunk_sizes, dim, input):
target_gpus = [_get_device_index(x, True) for x in target_gpus]
ctx.dim = dim
ctx.input_device = input.get_device() if input.device.type != "cpu" else -1
streams = None
if torch.accelerator.is_available() and ctx.input_device == -1:
# Perform CPU to GPU copies in a background stream
streams = [_get_stream(torch.device(device)) for device in target_gpus]
outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams)
# Synchronize with the copy stream
if streams is not None:
for i, output in enumerate(outputs):
with torch.accelerator.device_index(target_gpus[i]):
main_stream = torch.accelerator.current_stream()
main_stream.wait_stream(streams[i])
output.record_stream(main_stream)
return outputs
@staticmethod
def backward(ctx, *grad_output):
return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output)
# background streams used for copying
_streams: list[torch.Stream | None] | None = None
def _get_stream(device: torch.device):
"""Get a background stream for copying between CPU and target device."""
global _streams
if device.type == "cpu" or not torch.accelerator.is_available():
return None
assert torch.accelerator.current_accelerator().type == device.type
if _streams is None:
_streams = [None] * torch.accelerator.device_count()
if _streams[device.index] is None:
_streams[device.index] = torch.Stream(device.index)
return _streams[device.index]
| Scatter |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 83294,
"end": 88729
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(1765545342)
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50), random_state=self.rng)
assert np.all(vals >= 0) & np.all(vals <= 3)
assert np.shape(vals) == (2, 50)
assert vals.dtype.char in typecodes['AllInteger']
val = stats.hypergeom.rvs(20, 3, 10, random_state=self.rng)
assert isinstance(val, int)
val = stats.hypergeom(20, 3, 10).rvs(3, random_state=self.rng)
assert isinstance(val, np.ndarray)
assert val.dtype.char in typecodes['AllInteger']
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = [stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)
for eaten in fruits_eaten]
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2239.771 # From R
assert_almost_equal(result, expected, decimal=3)
k = 1
M = 1600
n = 600
N = 300
result = stats.hypergeom.logsf(k, M, n, N)
expected = -2.566567e-68 # From R
assert_almost_equal(result, expected, decimal=15)
def test_logcdf(self):
# Test logcdf for very large numbers. See issue #8692
# Results compare with those from R (v3.3.2):
# phyper(k, n, M-n, N, lower.tail=TRUE, log.p=TRUE)
# -5273.335
k = 1
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -5273.335 # From R
assert_almost_equal(result, expected, decimal=3)
# Same example as in issue #8692
k = 40
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -7.565148879229e-23 # From R
assert_almost_equal(result, expected, decimal=15)
k = 125
M = 1600
n = 250
N = 500
result = stats.hypergeom.logcdf(k, M, n, N)
expected = -4.242688e-12 # From R
assert_almost_equal(result, expected, decimal=15)
# test broadcasting robustness based on reviewer
# concerns in PR 9603; using an array version of
# the example from issue #8692
k = np.array([40, 40, 40])
M = 1600
n = 50
N = 300
result = stats.hypergeom.logcdf(k, M, n, N)
expected = np.full(3, -7.565148879229e-23) # filled from R result
assert_almost_equal(result, expected, decimal=15)
def test_mean_gh18511(self):
# gh-18511 reported that the `mean` was incorrect for large arguments;
# check that this is resolved
M = 390_000
n = 370_000
N = 12_000
hm = stats.hypergeom.mean(M, n, N)
rm = n / M * N
assert_allclose(hm, rm)
@pytest.mark.xslow
def test_sf_gh18506(self):
# gh-18506 reported that `sf` was incorrect for large population;
# check that this is resolved
n = 10
N = 10**5
i = np.arange(5, 15)
population_size = 10.**i
p = stats.hypergeom.sf(n - 1, population_size, N, n)
assert np.all(p > 0)
assert np.all(np.diff(p) < 0)
| TestHypergeom |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.