language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | bokeh__bokeh | src/bokeh/models/transforms.py | {
"start": 7975,
"end": 8309
} | class ____(Interpolator):
''' Compute a linear interpolation between the control points provided through
the ``x``, ``y``, and ``data`` parameters.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| LinearInterpolator |
python | gevent__gevent | src/gevent/tests/test__socket_dns.py | {
"start": 33589,
"end": 34172
} | class ____(TestCase):
def test(self):
self._test('getnameinfo', ('127.0.0.1', 80), 0)
def test_DGRAM(self):
self._test('getnameinfo', ('127.0.0.1', 779), 0)
self._test('getnameinfo', ('127.0.0.1', 779), socket.NI_DGRAM)
def test_NOFQDN(self):
# I get ('localhost', 'www') with _socket but ('localhost.localdomain', 'www') with gevent.socket
self._test('getnameinfo', ('127.0.0.1', 80), socket.NI_NOFQDN)
def test_NAMEREQD(self):
self._test('getnameinfo', ('127.0.0.1', 80), socket.NI_NAMEREQD)
| Test_getnameinfo_127001 |
python | python__mypy | test-data/unit/plugins/descriptor.py | {
"start": 200,
"end": 1352
} | class ____(Plugin):
def get_method_hook(self, fullname: str) -> Callable[[MethodContext], Type] | None:
if fullname == "__main__.Desc.__get__":
return get_hook
return None
def get_method_signature_hook(
self, fullname: str
) -> Callable[[MethodSigContext], CallableType] | None:
if fullname == "__main__.Desc.__set__":
return set_hook
return None
def get_hook(ctx: MethodContext) -> Type:
arg = get_proper_type(ctx.arg_types[0][0])
if isinstance(arg, NoneType):
return ctx.api.named_generic_type("builtins.str", [])
return ctx.api.named_generic_type("builtins.int", [])
def set_hook(ctx: MethodSigContext) -> CallableType:
return CallableType(
[
ctx.api.named_generic_type("__main__.Cls", []),
ctx.api.named_generic_type("builtins.int", []),
],
ctx.default_signature.arg_kinds,
ctx.default_signature.arg_names,
ctx.default_signature.ret_type,
ctx.default_signature.fallback,
)
def plugin(version: str) -> type[DescriptorPlugin]:
return DescriptorPlugin
| DescriptorPlugin |
python | numba__numba | numba/cuda/deviceufunc.py | {
"start": 10835,
"end": 13132
} | class ____(_BaseUFuncBuilder):
def __init__(self, func, identity=None, cache=False, targetoptions=None):
if targetoptions is None:
targetoptions = {}
if cache:
raise TypeError("caching is not supported")
for opt in targetoptions:
if opt == 'nopython':
warnings.warn("nopython kwarg for cuda target is redundant",
RuntimeWarning)
else:
fmt = "Unrecognized options. "
fmt += "cuda vectorize target does not support option: '%s'"
raise KeyError(fmt % opt)
self.py_func = func
self.identity = parse_identity(identity)
# { arg_dtype: (return_dtype), cudakernel }
self.kernelmap = OrderedDict()
@property
def pyfunc(self):
return self.py_func
def add(self, sig=None):
# compile core as device function
args, return_type = sigutils.normalize_signature(sig)
devfnsig = signature(return_type, *args)
funcname = self.pyfunc.__name__
kernelsource = self._get_kernel_source(self._kernel_template,
devfnsig, funcname)
corefn, return_type = self._compile_core(devfnsig)
glbl = self._get_globals(corefn)
sig = signature(types.void, *([a[:] for a in args] + [return_type[:]]))
exec(kernelsource, glbl)
stager = glbl['__vectorized_%s' % funcname]
kernel = self._compile_kernel(stager, sig)
argdtypes = tuple(to_dtype(t) for t in devfnsig.args)
resdtype = to_dtype(return_type)
self.kernelmap[tuple(argdtypes)] = resdtype, kernel
def build_ufunc(self):
raise NotImplementedError
def _get_kernel_source(self, template, sig, funcname):
args = ['a%d' % i for i in range(len(sig.args))]
fmts = dict(name=funcname,
args=', '.join(args),
argitems=', '.join('%s[__tid__]' % i for i in args))
return template.format(**fmts)
def _compile_core(self, sig):
raise NotImplementedError
def _get_globals(self, corefn):
raise NotImplementedError
def _compile_kernel(self, fnobj, sig):
raise NotImplementedError
| DeviceVectorize |
python | davidhalter__jedi | test/completion/recursion.py | {
"start": 801,
"end": 1240
} | class ____:
def b(self):
self.a1 = 1
self.a2 = 1
def c(self):
self.a2 = ''
def x(self):
self.b()
if self.a1 == 1:
self.a1 = self.a1 + 1
if self.a2 == UNDEFINED:
self.a2 = self.a2 + 1
#? int()
self.a1
#? int() str()
self.a2
#? int()
InstanceAttributeIfs().a1
#? int() str()
InstanceAttributeIfs().a2
| InstanceAttributeIfs |
python | mwaskom__seaborn | tests/test_relational.py | {
"start": 1167,
"end": 1782
} | class ____:
@pytest.fixture
def levels(self, long_df):
return {var: categorical_order(long_df[var]) for var in ["a", "b"]}
def scatter_rgbs(self, collections):
rgbs = []
for col in collections:
rgb = tuple(col.get_facecolor().squeeze()[:3])
rgbs.append(rgb)
return rgbs
def paths_equal(self, *args):
equal = all([len(a) == len(args[0]) for a in args])
for p1, p2 in zip(*args):
equal &= np.array_equal(p1.vertices, p2.vertices)
equal &= np.array_equal(p1.codes, p2.codes)
return equal
| Helpers |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_model_checkpoint_manual_opt.py | {
"start": 312,
"end": 883
} | class ____(Dataset):
def __init__(self):
self.data = [torch.randn(3) for _ in range(4)]
self.labels = [torch.randint(0, 2, (1,)) for _ in range(4)]
def __len__(self):
return 4
def __getitem__(self, idx):
return self.data[idx], self.labels[idx]
def save_model(model: torch.nn.Module, step_idx: int, saved_models):
model_copy = deepcopy(model)
state_dict = model_copy.cpu().state_dict()
saved_models[step_idx] = state_dict
def load_model(step_idx: int, saved_models):
return saved_models[step_idx]
| FakeDataset |
python | walkccc__LeetCode | solutions/2868. The Wording Game/2868.py | {
"start": 0,
"end": 1168
} | class ____:
def canAliceWin(self, a: list[str], b: list[str]) -> bool:
# words[0][i] := the biggest word starting with ('a' + i) for Alice
# words[1][i] := the biggest word starting with ('a' + i) for Bob
words = [[''] * 26 for _ in range(2)]
# For each letter, only the biggest word is useful.
for word in a:
words[0][ord(word[0]) - ord('a')] = word
for word in b:
words[1][ord(word[0]) - ord('a')] = word
# Find Alice's smallest word.
i = 0
while not words[0][i]:
i += 1
# 0 := Alice, 1 := Bob
# Start with Alice, so it's Bob's turn now.
turn = 1
# Iterate through each letter until we find a winner.
while True:
# If the current player has a word that having the letter that is greater
# than the opponent's word, choose it.
if words[turn][i] and words[turn][i] > words[1 - turn][i]:
# Choose the current words[turn][i].
pass
elif words[turn][i + 1]:
# Choose the next words[turn][i + 1].
i += 1
else:
# Game over. If it's Bob's turn, Alice wins, and vice versa.
return turn == 1
turn = 1 - turn
| Solution |
python | pytorch__pytorch | torch/_inductor/codegen/cutedsl/cutedsl_scheduling.py | {
"start": 666,
"end": 5310
} | class ____(BaseScheduling):
"""
Scheduling implementation for CuteDSL (CUTLASS Python DSL) kernels.
This class is intended to be used in combination with other schedulers,
and delegated to by CUDACombinedScheduling.
"""
@classmethod
def get_backend_features(cls, device) -> OrderedSet[BackendFeature]:
return OrderedSet()
@staticmethod
def is_cutedsl_template(node: BaseSchedulerNode) -> bool:
"""Check if a node is a CuteDSL template."""
return isinstance(node, SchedulerNode) and isinstance(
node.node, CuteDSLTemplateBuffer
)
def is_cutedsl_fused_template(self, node: BaseSchedulerNode) -> bool:
"""Check if a node is a fused CuteDSL template."""
return isinstance(node, FusedSchedulerNode) and self.is_cutedsl_template(node)
def can_fuse_vertical(
self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> bool:
"""
TODO CuteDSL doesn't support vertical fusion yet.
This could be extended in the future for epilogue fusion.
"""
return False
def define_kernel(self, src_code_str: str, node_schedule) -> str:
"""Produce the kernel string
Args:
src_code_str: The finalized kernel code string
node_schedule: List of nodes in the schedule
Note:
This is a little weird since async_compile.cutedsl() has to write the string to
a file in order to cute compile it. Feels bad to have two...
"""
wrapper = V.graph.wrapper_code
# Use the string as the key for caching
if src_code_str in wrapper.src_to_kernel:
kernel_name = wrapper.src_to_kernel[src_code_str]
else:
fused_name = (
get_fused_kernel_name(node_schedule, config.triton.descriptive_names)
if config.triton.descriptive_names
else ""
)
kernel_hash = hashlib.sha256(src_code_str.encode("utf-8")).hexdigest()[:8]
if fused_name == "fused":
kernel_name = f"cutedsl_{kernel_hash}"
else:
kernel_name = f"cutedsl_{fused_name}_{kernel_hash}"
wrapper.src_to_kernel[src_code_str] = kernel_name
src_code_str = src_code_str.replace(
str(Placeholder.KERNEL_NAME), kernel_name
)
_, _, kernel_path = get_path(code_hash(src_code_str), "py")
compile_wrapper = IndentedBuffer()
compile_wrapper.writeline(f"async_compile.cutedsl({kernel_name!r}, r'''")
compile_wrapper.splice(src_code_str, strip=True)
compile_wrapper.writeline("''')")
metadata_comment = f"# kernel path: {kernel_path}"
origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper)
metadata_comment += "\n" + origins + "\n" + detailed_origins
wrapper.define_kernel(
kernel_name, compile_wrapper.getvalue(), metadata_comment
)
return kernel_name
def codegen_template(
self,
template_node: BaseSchedulerNode,
epilogue_nodes: Sequence[BaseSchedulerNode],
prologue_nodes: Sequence[BaseSchedulerNode],
):
"""
Codegen a CuteDSL template. Currently doesn't support fusion.
"""
assert self.is_cutedsl_template(template_node), (
"Template node passed to CuteDSLScheduling.codegen_template must be a "
"SchedulerNode that wraps a CuteDSLTemplateBuffer"
)
# TODO remove when supported
assert not epilogue_nodes, "CuteDSL doesn't support epilogue fusion yet"
assert not prologue_nodes, "CuteDSL doesn't support prologue fusion yet"
template_node = cast(SchedulerNode, template_node)
ctb: CuteDSLTemplateBuffer = cast(CuteDSLTemplateBuffer, template_node.node)
kernel, render = ctb.make_kernel_render(ctb) # type: ignore[misc]
template_node.mark_run()
src_code = render()
# Finalize PartialRender if needed
if isinstance(src_code, PartialRender):
src_code_str = src_code.finalize_all()
else:
src_code_str = src_code
with V.set_kernel_handler(kernel):
node_schedule = [template_node]
kernel_name = self.define_kernel(src_code_str, node_schedule)
self.codegen_comment(node_schedule, kernel_name)
kernel.call_kernel(kernel_name, ctb)
V.graph.removed_buffers |= kernel.removed_buffers
self.free_buffers_in_scheduler()
| CuteDSLScheduling |
python | dask__dask | dask/dataframe/tseries/resample.py | {
"start": 6979,
"end": 9635
} | class ____:
"""Aggregate using one or more operations
The purpose of this class is to expose an API similar
to Pandas' `Resampler` for dask-expr
"""
def __init__(self, obj, rule, **kwargs):
if obj.divisions[0] is None:
msg = (
"Can only resample dataframes with known divisions\n"
"See https://docs.dask.org/en/latest/dataframe-design.html#partitions\n"
"for more information."
)
raise ValueError(msg)
self.obj = obj
self.rule = rule
self.kwargs = kwargs
def _single_agg(self, expr_cls, how_args=(), how_kwargs=None):
return new_collection(
expr_cls(
self.obj,
self.rule,
self.kwargs,
how_args=how_args,
how_kwargs=how_kwargs,
)
)
@derived_from(pd_Resampler)
def count(self):
return self._single_agg(ResampleCount)
@derived_from(pd_Resampler)
def sum(self):
return self._single_agg(ResampleSum)
@derived_from(pd_Resampler)
def prod(self):
return self._single_agg(ResampleProd)
@derived_from(pd_Resampler)
def mean(self):
return self._single_agg(ResampleMean)
@derived_from(pd_Resampler)
def min(self):
return self._single_agg(ResampleMin)
@derived_from(pd_Resampler)
def max(self):
return self._single_agg(ResampleMax)
@derived_from(pd_Resampler)
def first(self):
return self._single_agg(ResampleFirst)
@derived_from(pd_Resampler)
def last(self):
return self._single_agg(ResampleLast)
@derived_from(pd_Resampler)
def var(self):
return self._single_agg(ResampleVar)
@derived_from(pd_Resampler)
def std(self):
return self._single_agg(ResampleStd)
@derived_from(pd_Resampler)
def size(self):
return self._single_agg(ResampleSize)
@derived_from(pd_Resampler)
def nunique(self):
return self._single_agg(ResampleNUnique)
@derived_from(pd_Resampler)
def median(self):
return self._single_agg(ResampleMedian)
@derived_from(pd_Resampler)
def quantile(self):
return self._single_agg(ResampleQuantile)
@derived_from(pd_Resampler)
def ohlc(self):
return self._single_agg(ResampleOhlc)
@derived_from(pd_Resampler)
def sem(self):
return self._single_agg(ResampleSem)
@derived_from(pd_Resampler)
def agg(self, func, *args, **kwargs):
return self._single_agg(ResampleAgg, how_args=(func, *args), how_kwargs=kwargs)
| Resampler |
python | huggingface__transformers | src/transformers/models/bert/modeling_bert.py | {
"start": 15826,
"end": 18574
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BertAttention(
config,
is_causal=False,
layer_idx=layer_idx,
is_cross_attention=True,
)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
self_attention_output, _ = self.attention(
hidden_states,
attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
" by setting `config.add_cross_attention=True`"
)
cross_attention_output, _ = self.crossattention(
self_attention_output,
None, # attention_mask
encoder_hidden_states,
encoder_attention_mask,
past_key_values=past_key_values,
**kwargs,
)
attention_output = cross_attention_output
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
| BertLayer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed4.py | {
"start": 780,
"end": 854
} | class ____(TypedDict, extra_items=ReadOnly[str | int]):
name: str
| Movie3 |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 11110,
"end": 11667
} | class ____(TestCase):
"""Tests for ``roundrobin()``"""
def test_even_groups(self):
"""Ensure ordered output from evenly populated iterables"""
self.assertEqual(
list(mi.roundrobin('ABC', [1, 2, 3], range(3))),
['A', 1, 0, 'B', 2, 1, 'C', 3, 2],
)
def test_uneven_groups(self):
"""Ensure ordered output from unevenly populated iterables"""
self.assertEqual(
list(mi.roundrobin('ABCD', [1, 2], range(0))),
['A', 1, 'B', 2, 'C', 'D'],
)
| RoundrobinTests |
python | doocs__leetcode | solution/1100-1199/1124.Longest Well-Performing Interval/Solution.py | {
"start": 0,
"end": 381
} | class ____:
def longestWPI(self, hours: List[int]) -> int:
ans = s = 0
pos = {}
for i, x in enumerate(hours):
s += 1 if x > 8 else -1
if s > 0:
ans = i + 1
elif s - 1 in pos:
ans = max(ans, i - pos[s - 1])
if s not in pos:
pos[s] = i
return ans
| Solution |
python | dask__dask | dask/array/_array_expr/_expr.py | {
"start": 7832,
"end": 8310
} | class ____(FinalizeCompute, ArrayExpr):
_parameters = ["arr"]
def chunks(self):
return (self.arr.shape,)
def _simplify_down(self):
if self.arr.numblocks in ((), (1,)):
return self.arr
else:
from dask.array._array_expr._rechunk import Rechunk
return Rechunk(
self.arr,
tuple(-1 for _ in range(self.arr.ndim)),
method="tasks",
)
| FinalizeComputeArray |
python | huggingface__transformers | src/transformers/models/clvp/modeling_clvp.py | {
"start": 9667,
"end": 10388
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
ClvpRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| ClvpRMSNorm |
python | google__jax | tests/pallas/mgpu_ragged_dot_test.py | {
"start": 5760,
"end": 7873
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if blackwell_ragged_dot_mgpu is None:
self.skipTest("Mosaic GPU not available.")
if (not jtu.test_device_matches(["cuda"]) or
not jtu.is_cuda_compute_capability_equal("10.0")):
self.skipTest("Only works on GPU with capability sm100a")
self.enter_context(pallas_call._PALLAS_USE_MOSAIC_GPU(True))
@parameterized.product(
grid_tile_width=(1, 8, 16),
grid_minor_dim=(0, 1),
max_concurrent_steps=(2, 4),
num_groups=(1, 3, 16),
tile_k=(64, 128)
)
def test_ragged_dot(
self,
grid_tile_width,
grid_minor_dim,
max_concurrent_steps,
num_groups,
tile_k,
):
# Kernel does not support other tiling on M and N dimensions currently.
tile_m = 128
tile_n = 128
lhs_smem_size = tile_m * tile_k * max_concurrent_steps * 2
rhs_smem_size = tile_k * tile_n * max_concurrent_steps * 2
# B200 SMEM limit is 228kB.
if lhs_smem_size + rhs_smem_size > 228_000:
self.skipTest("This configuration requires too much SMEM.")
dtype = jnp.float16
m, k, n = 16 * 1024, 2048, 16 * 1024
lhs, rhs, group_sizes = sample_inputs(
random.key(1234), m, k, n, num_groups, dtype
)
tuning_config = blackwell_ragged_dot_mgpu.TuningConfig(
tile_m=tile_m,
tile_n=tile_n,
tile_k=tile_k,
grid_tile_width=grid_tile_width,
grid_minor_dim=grid_minor_dim,
max_concurrent_steps=max_concurrent_steps,
collective=True,
)
out = blackwell_ragged_dot_mgpu.ragged_dot_kernel(
lhs,
rhs,
group_sizes=group_sizes,
config=tuning_config,
)
out_ref = jax.lax.ragged_dot(lhs, rhs, group_sizes=group_sizes,
preferred_element_type=dtype)
np.testing.assert_allclose(out, out_ref, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
os.environ["XLA_FLAGS"] = (
os.environ.get("XLA_FLAGS", "") + " --xla_gpu_autotune_level=0"
)
absltest.main(testLoader=jtu.JaxTestLoader())
| RaggedDotSm100aTestCase |
python | neetcode-gh__leetcode | python/2402-meeting-rooms-iii.py | {
"start": 0,
"end": 711
} | class ____:
def mostBooked(self, n: int, meetings: List[List[int]]) -> int:
meetings.sort()
available = [i for i in range(n)]
used = []
count = [0] * n
for start, end in meetings:
while used and start >= used[0][0]:
_, room = heapq.heappop(used)
heapq.heappush(available, room)
if not available:
end_time, room = heapq.heappop(used)
end = end_time + (end - start)
heapq.heappush(available, room)
room = heapq.heappop(available)
heapq.heappush(used, (end, room))
count[room] += 1
return count.index(max(count))
| Solution |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 37501,
"end": 37716
} | class ____(BoringModel):
def on_validation_model_train(self):
if not self.trainer.sanity_checking and self.current_epoch == 1:
raise RuntimeError("Trouble!")
| TroubledModelOnValidationModelTrain |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/transfers/test_s3_to_sftp.py | {
"start": 1630,
"end": 9662
} | class ____:
def setup_method(self):
hook = SSHHook(ssh_conn_id="ssh_default")
hook.no_host_key_check = True
dag = DAG(
f"{TEST_DAG_ID}test_schedule_dag_once",
start_date=DEFAULT_DATE,
schedule="@once",
)
self.hook = hook
self.ssh_client = self.hook.get_conn()
self.sftp_client = self.ssh_client.open_sftp()
self.dag = dag
self.s3_bucket = BUCKET
self.sftp_path = SFTP_PATH
self.s3_key = S3_KEY
@mock_aws
@conf_vars({("core", "enable_xcom_pickling"): "True"})
def test_s3_to_sftp_operation(self):
s3_hook = S3Hook(aws_conn_id=None)
# Setting
test_remote_file_content = (
"This is remote file content \n which is also multiline "
"another line here \n this is last line. EOF"
)
# Test for creation of s3 bucket
conn = boto3.client("s3")
conn.create_bucket(Bucket=self.s3_bucket)
assert s3_hook.check_for_bucket(self.s3_bucket)
with open(LOCAL_FILE_PATH, "w") as file:
file.write(test_remote_file_content)
s3_hook.load_file(LOCAL_FILE_PATH, self.s3_key, bucket_name=BUCKET)
# Check if object was created in s3
objects_in_dest_bucket = conn.list_objects(Bucket=self.s3_bucket, Prefix=self.s3_key)
# there should be object found, and there should only be one object found
assert len(objects_in_dest_bucket["Contents"]) == 1
# the object found should be consistent with dest_key specified earlier
assert objects_in_dest_bucket["Contents"][0]["Key"] == self.s3_key
# get remote file to local
run_task = S3ToSFTPOperator(
s3_bucket=BUCKET,
s3_key=S3_KEY,
sftp_path=SFTP_PATH,
sftp_conn_id=SFTP_CONN_ID,
task_id=TASK_ID,
dag=self.dag,
)
assert run_task is not None
run_task.execute(None)
# Check that the file is created remotely
check_file_task = SSHOperator(
task_id="test_check_file",
ssh_hook=self.hook,
command=f"cat {self.sftp_path}",
do_xcom_push=True,
dag=self.dag,
)
assert check_file_task is not None
result = check_file_task.execute(None)
assert result.strip() == test_remote_file_content.encode("utf-8")
# Clean up after finishing with test
conn.delete_object(Bucket=self.s3_bucket, Key=self.s3_key)
conn.delete_bucket(Bucket=self.s3_bucket)
assert not s3_hook.check_for_bucket(self.s3_bucket)
def delete_remote_resource(self):
# Initiate SHH hook
hook = SSHHook(ssh_conn_id="ssh_default")
hook.no_host_key_check = True
# check the remote file content
remove_file_task = SSHOperator(
task_id="test_rm_file",
ssh_hook=hook,
command=f"rm {self.sftp_path}",
do_xcom_push=True,
dag=self.dag,
)
assert remove_file_task is not None
remove_file_task.execute(None)
@mock_aws
@conf_vars({("core", "enable_xcom_pickling"): "True"})
def test_s3_to_sftp_operation_confirm_true_default(self):
"""Test that S3ToSFTPOperator works with confirm=True by default (real SSH connection)"""
s3_hook = S3Hook(aws_conn_id=None)
# Setting
test_remote_file_content = (
"This is remote file content for confirm=True test \n which is also multiline "
"another line here \n this is last line. EOF"
)
# Test for creation of s3 bucket
conn = boto3.client("s3")
conn.create_bucket(Bucket=self.s3_bucket)
assert s3_hook.check_for_bucket(self.s3_bucket)
with open(LOCAL_FILE_PATH, "w") as file:
file.write(test_remote_file_content)
s3_hook.load_file(LOCAL_FILE_PATH, self.s3_key, bucket_name=BUCKET)
# Check if object was created in s3
objects_in_dest_bucket = conn.list_objects(Bucket=self.s3_bucket, Prefix=self.s3_key)
# there should be object found, and there should only be one object found
assert len(objects_in_dest_bucket["Contents"]) == 1
# the object found should be consistent with dest_key specified earlier
assert objects_in_dest_bucket["Contents"][0]["Key"] == self.s3_key
# get remote file to local - Test with default confirm=True
run_task = S3ToSFTPOperator(
s3_bucket=BUCKET,
s3_key=S3_KEY,
sftp_path=SFTP_PATH,
sftp_conn_id=SFTP_CONN_ID,
task_id=TASK_ID + "_confirm_true",
dag=self.dag,
)
assert run_task is not None
run_task.execute(None)
# Check that the file is created remotely with correct content
check_file_task = SSHOperator(
task_id="test_check_file_confirm_true",
ssh_hook=self.hook,
command=f"cat {self.sftp_path}",
do_xcom_push=True,
dag=self.dag,
)
assert check_file_task is not None
result = check_file_task.execute(None)
assert result.strip() == test_remote_file_content.encode("utf-8")
# Clean up after finishing with test
conn.delete_object(Bucket=self.s3_bucket, Key=self.s3_key)
conn.delete_bucket(Bucket=self.s3_bucket)
assert not s3_hook.check_for_bucket(self.s3_bucket)
@mock_aws
@conf_vars({("core", "enable_xcom_pickling"): "True"})
def test_s3_to_sftp_operation_confirm_false(self):
"""Test that S3ToSFTPOperator works with confirm=False when specified (real SSH connection)"""
s3_hook = S3Hook(aws_conn_id=None)
# Setting
test_remote_file_content = (
"This is remote file content for confirm=False test \n which is also multiline "
"another line here \n this is last line. EOF"
)
# Test for creation of s3 bucket
conn = boto3.client("s3")
conn.create_bucket(Bucket=self.s3_bucket)
assert s3_hook.check_for_bucket(self.s3_bucket)
with open(LOCAL_FILE_PATH, "w") as file:
file.write(test_remote_file_content)
s3_hook.load_file(LOCAL_FILE_PATH, self.s3_key, bucket_name=BUCKET)
# Check if object was created in s3
objects_in_dest_bucket = conn.list_objects(Bucket=self.s3_bucket, Prefix=self.s3_key)
# there should be object found, and there should only be one object found
assert len(objects_in_dest_bucket["Contents"]) == 1
# the object found should be consistent with dest_key specified earlier
assert objects_in_dest_bucket["Contents"][0]["Key"] == self.s3_key
# get remote file to local - Test with explicit confirm=False
run_task = S3ToSFTPOperator(
s3_bucket=BUCKET,
s3_key=S3_KEY,
sftp_path=SFTP_PATH,
sftp_conn_id=SFTP_CONN_ID,
task_id=TASK_ID + "_confirm_false",
confirm=False, # Explicitly set to False
dag=self.dag,
)
assert run_task is not None
run_task.execute(None)
# Check that the file is created remotely with correct content
check_file_task = SSHOperator(
task_id="test_check_file_confirm_false",
ssh_hook=self.hook,
command=f"cat {self.sftp_path}",
do_xcom_push=True,
dag=self.dag,
)
assert check_file_task is not None
result = check_file_task.execute(None)
assert result.strip() == test_remote_file_content.encode("utf-8")
# Clean up after finishing with test
conn.delete_object(Bucket=self.s3_bucket, Key=self.s3_key)
conn.delete_bucket(Bucket=self.s3_bucket)
assert not s3_hook.check_for_bucket(self.s3_bucket)
def teardown_method(self):
self.delete_remote_resource()
| TestS3ToSFTPOperator |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0031_add_version_fields_to_build.py | {
"start": 149,
"end": 1306
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0030_add_automation_rule_matches"),
]
operations = [
migrations.AddField(
model_name="build",
name="version_name",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Version name"
),
),
migrations.AddField(
model_name="build",
name="version_slug",
field=models.CharField(
blank=True, max_length=255, null=True, verbose_name="Version slug"
),
),
migrations.AddField(
model_name="build",
name="version_type",
field=models.CharField(
blank=True,
choices=[
("branch", "Branch"),
("tag", "Tag"),
("external", "External"),
("unknown", "Unknown"),
],
max_length=32,
null=True,
verbose_name="Version type",
),
),
]
| Migration |
python | pytorch__pytorch | test/distributed/test_c10d_nccl.py | {
"start": 244290,
"end": 246753
} | class ____(NCCLTraceTestDumpOnTimeoutBase):
@check_if_test_is_skipped
def _check_return_codes(self, elapsed_time):
# the base test infra assumes processes exit with matching return codes,
# but we want rank0 to abort and rank1 to exit cleanly in this test
self.assertEqual(self.processes[0].exitcode, -6)
self.assertEqual(self.processes[1].exitcode, -6)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_timeout_dumps_on_stuck_ranks(self):
# need rank0 to crash quicker after detecting timeout
os.environ["TORCH_NCCL_HEARTBEAT_TIMEOUT_SEC"] = "1"
# restore this env var to its prior default in case another test changed it
os.environ["TORCH_NCCL_COORD_CHECK_MILSEC"] = "1000"
if self.rank == self.MAIN_PROCESS_RANK:
# wait for both rank0 and 1 to crash before looking for both ranks' output
# file, and we rely on rank1 to sleep long enough to dump the debug info.
self.assertEqual(self._wait_process(0, timeout=90), -6)
self.assertEqual(self._wait_process(1, timeout=90), -6)
self.assertTrue(os.path.exists(self._trace_name(rank=1)))
self.assertTrue(os.path.exists(self._trace_name(rank=0)))
with open(self._trace_name(rank=0), "rb") as f:
t = pickle.load(f)
t = t["entries"]
self.assertEqual(len(t), 2)
with open(self._trace_name(rank=1), "rb") as f:
t = pickle.load(f)
t = t["entries"]
self.assertEqual(len(t), 1)
self.assertEqual(t[0]["collective_seq_id"], 1)
self.assertEqual(t[0]["state"], "completed")
return
pg = self._create_process_group_nccl()
device = self.local_device
with torch.cuda.device(device):
a = torch.full((3, 4), float(self.rank), device=device)
pg.allreduce(a).wait()
if self.rank == 0:
pg.allreduce(a).wait()
# rank 0 will get stuck, timeout and then signal a timeout to all ranks.
torch.cuda.synchronize(device=device)
if self.rank == 1:
# Force rank 1 to idle so that it will eventually timeout as well after
# getting the global signal to dump the debugging info.
time.sleep(600)
@skip_but_pass_in_sandcastle
| NCCLTraceTestTimeoutDumpOnStuckRanks |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/utils/kernel_handler.py | {
"start": 2445,
"end": 2710
} | class ____:
SpyderKernelWaitComm = "spyder_kernel_wait_comm"
SpyderKernelReady = "spyder_kernel_ready"
IpykernelReady = "ipykernel_ready"
Connecting = "connecting"
Error = "error"
Closed = "closed"
Crashed = "crashed"
| KernelConnectionState |
python | great-expectations__great_expectations | great_expectations/profile/base.py | {
"start": 3447,
"end": 3628
} | class ____(Enum):
DATETIME = "DATETIME"
NUMERIC = "NUMERIC"
STRING = "STRING"
VALUE_SET = "VALUE_SET"
BOOLEAN = "BOOLEAN"
OTHER = "OTHER"
| ProfilerSemanticTypes |
python | kamyu104__LeetCode-Solutions | Python/maximum-element-sum-of-a-complete-subset-of-indices.py | {
"start": 166,
"end": 415
} | class ____(object):
def maximumSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return max(sum(nums[i*x**2-1] for x in xrange(1, int((len(nums)//i)**0.5)+1)) for i in xrange(1, len(nums)+1))
| Solution |
python | davidhalter__parso | parso/python/tree.py | {
"start": 20715,
"end": 20764
} | class ____(PythonBaseNode):
__slots__ = ()
| Flow |
python | gevent__gevent | src/gevent/libuv/watcher.py | {
"start": 27906,
"end": 28313
} | class ____(_base.SignalMixin, watcher):
_watcher_callback_name = '_gevent_signal_callback1'
def _watcher_ffi_init(self, args):
self._watcher_init(self.loop.ptr, self._watcher)
self.ref = False # libev doesn't ref these by default
def _watcher_ffi_start(self):
self._watcher_start(self._watcher, self._watcher_callback,
self._signalnum)
| signal |
python | ethereum__web3.py | web3/_utils/abi.py | {
"start": 7771,
"end": 7880
} | class ____(AcceptsHexStrEncoder):
subencoder_cls = encoding.BytesEncoder
is_strict = False
| BytesEncoder |
python | tiangolo__fastapi | tests/test_dependency_yield_scope_websockets.py | {
"start": 819,
"end": 6219
} | class ____:
def __init__(self, name: str = "default") -> None:
self.name = name
self.open = True
def get_named_session(session: SessionRequestDep, session_b: SessionDefaultDep) -> Any:
assert session is session_b
named_session = NamedSession(name="named")
yield named_session, session_b
named_session.open = False
global_state = global_context.get()
global_state["named_session_closed"] = True
NamedSessionsDep = Annotated[Tuple[NamedSession, Session], Depends(get_named_session)]
def get_named_func_session(session: SessionFuncDep) -> Any:
named_session = NamedSession(name="named")
yield named_session, session
named_session.open = False
global_state = global_context.get()
global_state["named_func_session_closed"] = True
def get_named_regular_func_session(session: SessionFuncDep) -> Any:
named_session = NamedSession(name="named")
return named_session, session
BrokenSessionsDep = Annotated[
Tuple[NamedSession, Session], Depends(get_named_func_session)
]
NamedSessionsFuncDep = Annotated[
Tuple[NamedSession, Session], Depends(get_named_func_session, scope="function")
]
RegularSessionsDep = Annotated[
Tuple[NamedSession, Session], Depends(get_named_regular_func_session)
]
app = FastAPI()
@app.websocket("/function-scope")
async def function_scope(websocket: WebSocket, session: SessionFuncDep) -> Any:
await websocket.accept()
await websocket.send_json({"is_open": session.open})
@app.websocket("/request-scope")
async def request_scope(websocket: WebSocket, session: SessionRequestDep) -> Any:
await websocket.accept()
await websocket.send_json({"is_open": session.open})
@app.websocket("/two-scopes")
async def get_stream_session(
websocket: WebSocket,
function_session: SessionFuncDep,
request_session: SessionRequestDep,
) -> Any:
await websocket.accept()
await websocket.send_json(
{"func_is_open": function_session.open, "req_is_open": request_session.open}
)
@app.websocket("/sub")
async def get_sub(websocket: WebSocket, sessions: NamedSessionsDep) -> Any:
await websocket.accept()
await websocket.send_json(
{"named_session_open": sessions[0].open, "session_open": sessions[1].open}
)
@app.websocket("/named-function-scope")
async def get_named_function_scope(
websocket: WebSocket, sessions: NamedSessionsFuncDep
) -> Any:
await websocket.accept()
await websocket.send_json(
{"named_session_open": sessions[0].open, "session_open": sessions[1].open}
)
@app.websocket("/regular-function-scope")
async def get_regular_function_scope(
websocket: WebSocket, sessions: RegularSessionsDep
) -> Any:
await websocket.accept()
await websocket.send_json(
{"named_session_open": sessions[0].open, "session_open": sessions[1].open}
)
client = TestClient(app)
def test_function_scope() -> None:
global_context.set({})
global_state = global_context.get()
with client.websocket_connect("/function-scope") as websocket:
data = websocket.receive_json()
assert data["is_open"] is True
assert global_state["session_closed"] is True
def test_request_scope() -> None:
global_context.set({})
global_state = global_context.get()
with client.websocket_connect("/request-scope") as websocket:
data = websocket.receive_json()
assert data["is_open"] is True
assert global_state["session_closed"] is True
def test_two_scopes() -> None:
global_context.set({})
global_state = global_context.get()
with client.websocket_connect("/two-scopes") as websocket:
data = websocket.receive_json()
assert data["func_is_open"] is True
assert data["req_is_open"] is True
assert global_state["session_closed"] is True
def test_sub() -> None:
global_context.set({})
global_state = global_context.get()
with client.websocket_connect("/sub") as websocket:
data = websocket.receive_json()
assert data["named_session_open"] is True
assert data["session_open"] is True
assert global_state["session_closed"] is True
assert global_state["named_session_closed"] is True
def test_broken_scope() -> None:
with pytest.raises(
FastAPIError,
match='The dependency "get_named_func_session" has a scope of "request", it cannot depend on dependencies with scope "function"',
):
@app.websocket("/broken-scope")
async def get_broken(
websocket: WebSocket, sessions: BrokenSessionsDep
) -> Any: # pragma: no cover
pass
def test_named_function_scope() -> None:
global_context.set({})
global_state = global_context.get()
with client.websocket_connect("/named-function-scope") as websocket:
data = websocket.receive_json()
assert data["named_session_open"] is True
assert data["session_open"] is True
assert global_state["session_closed"] is True
assert global_state["named_func_session_closed"] is True
def test_regular_function_scope() -> None:
global_context.set({})
global_state = global_context.get()
with client.websocket_connect("/regular-function-scope") as websocket:
data = websocket.receive_json()
assert data["named_session_open"] is True
assert data["session_open"] is True
assert global_state["session_closed"] is True
| NamedSession |
python | plotly__plotly.py | plotly/graph_objs/funnel/_stream.py | {
"start": 233,
"end": 3494
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnel"
_path_str = "funnel.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.funnel.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnel.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnel.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | mlflow__mlflow | mlflow/store/artifact/dbfs_artifact_repo.py | {
"start": 1660,
"end": 11587
} | class ____(ArtifactRepository):
"""
Stores artifacts on DBFS using the DBFS REST API.
This repository is used with URIs of the form ``dbfs:/<path>``. The repository can only be used
together with the RestStore.
"""
def __init__(
self, artifact_uri: str, tracking_uri: str | None = None, registry_uri: str | None = None
) -> None:
if not is_valid_dbfs_uri(artifact_uri):
raise MlflowException(
message="DBFS URI must be of the form dbfs:/<path> or "
+ "dbfs://profile@databricks/<path>",
error_code=INVALID_PARAMETER_VALUE,
)
# The dbfs:/ path ultimately used for artifact operations should not contain the
# Databricks profile info, so strip it before setting ``artifact_uri``.
super().__init__(
remove_databricks_profile_info_from_artifact_uri(artifact_uri),
tracking_uri,
registry_uri,
)
if databricks_profile_uri := get_databricks_profile_uri_from_artifact_uri(artifact_uri):
hostcreds_from_uri = get_databricks_host_creds(databricks_profile_uri)
self.get_host_creds = lambda: hostcreds_from_uri
else:
self.get_host_creds = _get_host_creds_from_default_store()
def _databricks_api_request(self, endpoint, method, **kwargs):
host_creds = self.get_host_creds()
return http_request_safe(host_creds=host_creds, endpoint=endpoint, method=method, **kwargs)
def _dbfs_list_api(self, json):
host_creds = self.get_host_creds()
return http_request(
host_creds=host_creds, endpoint=LIST_API_ENDPOINT, method="GET", params=json
)
def _dbfs_download(self, output_path, endpoint):
with open(output_path, "wb") as f:
response = self._databricks_api_request(endpoint=endpoint, method="GET", stream=True)
try:
for content in response.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):
f.write(content)
finally:
response.close()
def _is_directory(self, artifact_path):
dbfs_path = self._get_dbfs_path(artifact_path) if artifact_path else self._get_dbfs_path("")
return self._dbfs_is_dir(dbfs_path)
def _dbfs_is_dir(self, dbfs_path):
response = self._databricks_api_request(
endpoint=GET_STATUS_ENDPOINT, method="GET", params={"path": dbfs_path}
)
json_response = json.loads(response.text)
try:
return json_response["is_dir"]
except KeyError:
raise MlflowException(f"DBFS path {dbfs_path} does not exist")
def _get_dbfs_path(self, artifact_path):
return "/{}/{}".format(
strip_scheme(self.artifact_uri).lstrip("/"),
artifact_path.lstrip("/"),
)
def _get_dbfs_endpoint(self, artifact_path):
return f"/dbfs{self._get_dbfs_path(artifact_path)}"
def log_artifact(self, local_file, artifact_path=None):
basename = os.path.basename(local_file)
if artifact_path:
http_endpoint = self._get_dbfs_endpoint(posixpath.join(artifact_path, basename))
else:
http_endpoint = self._get_dbfs_endpoint(basename)
if os.stat(local_file).st_size == 0:
# The API frontend doesn't like it when we post empty files to it using
# `requests.request`, potentially due to the bug described in
# https://github.com/requests/requests/issues/4215
self._databricks_api_request(
endpoint=http_endpoint, method="POST", data="", allow_redirects=False
)
else:
with open(local_file, "rb") as f:
self._databricks_api_request(
endpoint=http_endpoint, method="POST", data=f, allow_redirects=False
)
def log_artifacts(self, local_dir, artifact_path=None):
artifact_path = artifact_path or ""
for dirpath, _, filenames in os.walk(local_dir):
artifact_subdir = artifact_path
if dirpath != local_dir:
rel_path = os.path.relpath(dirpath, local_dir)
rel_path = relative_path_to_artifact_path(rel_path)
artifact_subdir = posixpath.join(artifact_path, rel_path)
for name in filenames:
file_path = os.path.join(dirpath, name)
self.log_artifact(file_path, artifact_subdir)
def list_artifacts(self, path: str | None = None) -> list[FileInfo]:
dbfs_path = self._get_dbfs_path(path) if path else self._get_dbfs_path("")
dbfs_list_json = {"path": dbfs_path}
response = self._dbfs_list_api(dbfs_list_json)
try:
json_response = json.loads(response.text)
except ValueError:
raise MlflowException(
f"API request to list files under DBFS path {dbfs_path} failed with "
f"status code {response.status_code}. Response body: {response.text}"
)
# /api/2.0/dbfs/list will not have the 'files' key in the response for empty directories.
infos = []
artifact_prefix = strip_prefix(self.artifact_uri, "dbfs:")
if json_response.get("error_code", None) == RESOURCE_NON_EXISTENT:
return []
dbfs_files = json_response.get("files", [])
for dbfs_file in dbfs_files:
stripped_path = strip_prefix(dbfs_file["path"], artifact_prefix + "/")
# If `path` is a file, the DBFS list API returns a single list element with the
# same name as `path`. The list_artifacts API expects us to return an empty list in this
# case, so we do so here.
if stripped_path == path:
return []
is_dir = dbfs_file["is_dir"]
artifact_size = None if is_dir else dbfs_file["file_size"]
infos.append(FileInfo(stripped_path, is_dir, artifact_size))
return sorted(infos, key=lambda f: f.path)
def _download_file(self, remote_file_path, local_path):
self._dbfs_download(
output_path=local_path, endpoint=self._get_dbfs_endpoint(remote_file_path)
)
def delete_artifacts(self, artifact_path=None):
raise MlflowException("Not implemented yet")
def _get_host_creds_from_default_store():
store = utils._get_store()
if not isinstance(store, RestStore):
raise MlflowException(
"Failed to get credentials for DBFS; they are read from the "
+ "Databricks CLI credentials or MLFLOW_TRACKING* environment "
+ "variables."
)
return store.get_host_creds
def dbfs_artifact_repo_factory(
artifact_uri: str, tracking_uri: str | None = None, registry_uri: str | None = None
):
"""
Returns an ArtifactRepository subclass for storing artifacts on DBFS.
This factory method is used with URIs of the form ``dbfs:/<path>``. DBFS-backed artifact
storage can only be used together with the RestStore.
In the special case where the URI is of the form
`dbfs:/databricks/mlflow-tracking/<Exp-ID>/<Run-ID>/<path>',
a DatabricksArtifactRepository is returned. This is capable of storing access controlled
artifacts.
Args:
artifact_uri: DBFS root artifact URI.
tracking_uri: The tracking URI.
registry_uri: The registry URI.
Returns:
Subclass of ArtifactRepository capable of storing artifacts on DBFS.
"""
if not is_valid_dbfs_uri(artifact_uri):
raise MlflowException(
"DBFS URI must be of the form dbfs:/<path> or "
+ "dbfs://profile@databricks/<path>, but received "
+ artifact_uri
)
cleaned_artifact_uri = artifact_uri.rstrip("/")
db_profile_uri = get_databricks_profile_uri_from_artifact_uri(cleaned_artifact_uri)
if is_databricks_acled_artifacts_uri(artifact_uri):
if DatabricksLoggedModelArtifactRepository.is_logged_model_uri(artifact_uri):
return DatabricksLoggedModelArtifactRepository(
cleaned_artifact_uri, tracking_uri=tracking_uri, registry_uri=registry_uri
)
elif (
not MLFLOW_DISABLE_DATABRICKS_SDK_FOR_RUN_ARTIFACTS.get()
and DatabricksRunArtifactRepository.is_run_uri(artifact_uri)
):
return DatabricksRunArtifactRepository(
cleaned_artifact_uri, tracking_uri=tracking_uri, registry_uri=registry_uri
)
return DatabricksArtifactRepository(
cleaned_artifact_uri, tracking_uri=tracking_uri, registry_uri=registry_uri
)
elif (
mlflow.utils.databricks_utils.is_dbfs_fuse_available()
and MLFLOW_ENABLE_DBFS_FUSE_ARTIFACT_REPO.get()
and not is_databricks_model_registry_artifacts_uri(artifact_uri)
and (db_profile_uri is None or db_profile_uri == "databricks")
):
# If the DBFS FUSE mount is available, write artifacts directly to
# /dbfs/... using local filesystem APIs.
# Note: it is possible for a named Databricks profile to point to the current workspace,
# but we're going to avoid doing a complex check and assume users will use `databricks`
# to mean the current workspace. Using `DbfsRestArtifactRepository` to access the current
# workspace's DBFS should still work; it just may be slower.
final_artifact_uri = remove_databricks_profile_info_from_artifact_uri(cleaned_artifact_uri)
file_uri = "file:///dbfs/{}".format(strip_prefix(final_artifact_uri, "dbfs:/"))
return LocalArtifactRepository(
file_uri, tracking_uri=tracking_uri, registry_uri=registry_uri
)
return DbfsRestArtifactRepository(
cleaned_artifact_uri, tracking_uri=tracking_uri, registry_uri=registry_uri
)
| DbfsRestArtifactRepository |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/skills/version_delete_response.py | {
"start": 160,
"end": 465
} | class ____(BaseModel):
id: str
"""Version identifier for the skill.
Each version is identified by a Unix epoch timestamp (e.g., "1759178010641129").
"""
type: str
"""Deleted object type.
For Skill Versions, this is always `"skill_version_deleted"`.
"""
| VersionDeleteResponse |
python | pytorch__pytorch | test/dynamo/test_model_output.py | {
"start": 8767,
"end": 12251
} | class ____(TestCase):
@maybe_skip
def test_HF_bert_model_output(self, device):
class BertPooler(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.dense = torch.nn.Linear(768, 768).to(device)
self.activation = torch.nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertEncoder(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self,
hidden_states: torch.Tensor,
) -> BaseModelOutputWithPastAndCrossAttentions:
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=None,
hidden_states=None,
attentions=None,
cross_attentions=None,
)
class BertModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.encoder = BertEncoder()
self.pooler = BertPooler()
def forward(
self,
sequence_output: torch.Tensor,
) -> BaseModelOutputWithPoolingAndCrossAttentions:
encoder_outputs = self.encoder(sequence_output)
# test __getitem__ and to_tuple
sequence_output = encoder_outputs[0]
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
# test CustomDictVariable.create
result = BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
# test __setattr__
result.pooler_output = pooled_output
# test __setitem__
result["pooler_output"] = pooled_output
return result
sequence_output = torch.rand(1, 12, 768).to(device)
model = BertModel()
orig_result = model(sequence_output)
compiled_model = torch.compile(model, backend="eager")
compiled_result = compiled_model(sequence_output)
self.assertTrue(
torch.allclose(
orig_result.last_hidden_state, compiled_result.last_hidden_state
)
)
self.assertTrue(
torch.allclose(orig_result.pooler_output, compiled_result.pooler_output)
)
devices = ["cpu", "cuda", "xpu", "hpu"]
instantiate_device_type_tests(
TestModelOutputBert, globals(), only_for=devices, allow_xpu=True
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TestModelOutputBert |
python | google__jax | jax/_src/interpreters/mlir.py | {
"start": 44577,
"end": 60245
} | class ____(NamedTuple):
contains_unconstrained: bool
all_unconstrained: bool
def _get_unconstrained_variants(s, aval) -> UnconstrainedVariants:
us = contains_unconstrained(s)
return UnconstrainedVariants(
contains_unconstrained=us, all_unconstrained=all_unconstrained(s, aval))
def check_jaxpr_constants(closed_jaxpr: core.ClosedJaxpr):
"""Check if a JAXPR contains an excessive amount of constants, if so, report where they were captured"""
if config.use_simplified_jaxpr_constants.value:
return
if (threshold := config.captured_constants_warn_bytes.value) == -1:
return
# need the unaesthetic getter here as some of the consts in the test suite are arbitrary objects
total_iter, nbytes_iter = itertools.tee(
map(lambda c: getattr(c, "nbytes", 0), closed_jaxpr.consts)
)
if (total_bytes := sum(total_iter)) < threshold:
return
message = (
"A large amount of constants were captured during lowering"
f" ({util.pprint_bytes(total_bytes)} total). If this is intentional,"
" disable this warning by setting JAX_CAPTURED_CONSTANTS_WARN_BYTES=-1. "
)
if not (num_frames := config.captured_constants_report_frames.value):
message += (
"To obtain a report of where these constants were encountered, "
"set JAX_CAPTURED_CONSTANTS_REPORT_FRAMES=-1."
)
warnings.warn(message)
return
message += (
"The subsequent report may be disabled by setting JAX_CAPTURED_CONSTANTS_REPORT_FRAMES=0.\n\n"
f"Largest {min(num_frames, len(closed_jaxpr.consts))} allocation(s):\n"
)
try:
nbytes_var_const = zip(nbytes_iter, closed_jaxpr.jaxpr.constvars, closed_jaxpr.consts)
for nbytes, var, const in heapq.nlargest(5, nbytes_var_const, key=operator.itemgetter(0)):
message += f" Constant {type(const)}, {var.aval.str_short()}, {util.pprint_bytes(nbytes)} captured at:\n"
for eqn in jaxpr_util.eqns_using_var(closed_jaxpr.jaxpr, var):
call_frame_source_info = source_info_util.summarize(eqn.source_info, num_frames)
message += " " * 2 + call_frame_source_info.replace("\n", "\n" + " " * 2) + "\n\n"
warnings.warn(message)
except Exception as exc:
warnings.warn(message + f" Exception raised while generating report: {exc}")
# TODO(phawkins): it is my firm belief that:
# a) channel IDs have only a vestigal function when applied to collectives, and
# b) their identity does not matter. The presence or absence of a channel
# changes whether XLA considers collectives to be inter-replica or
# inter-partition, but beyond that we believe they have little effect.
COLLECTIVE_CHANNEL_ID = 1
def lower_jaxpr_to_module(
module_name: str,
jaxpr: core.ClosedJaxpr,
*,
num_const_args: int,
in_avals: Sequence[core.AbstractValue],
ordered_effects: list[core.Effect],
# See ModuleContext.get_backend() for backend and platforms usage.
platforms: Sequence[str],
backend: xb.XlaBackend | None,
axis_context: AxisContext,
donated_args: Sequence[bool],
replicated_args: Sequence[bool] | None = None,
arg_shardings: Sequence[JSharding | AUTO | None] | None = None,
result_shardings: Sequence[JSharding | AUTO | None] | None = None,
in_layouts: Sequence[Layout | None | AutoLayout] | None = None,
out_layouts: Sequence[Layout | None | AutoLayout] | None = None,
arg_names: Sequence[str] | None = None,
result_names: Sequence[str] | None = None,
num_replicas: int = 1,
num_partitions: int = 1,
all_default_mem_kind: bool = True,
input_output_aliases: None | tuple[int | None, ...] = None,
propagated_out_mem_kinds: tuple[None | str, ...] | None = None,
lowering_parameters: LoweringParameters,
) -> LoweringResult:
"""Lowers a top-level jaxpr to an MLIR module.
Handles the quirks of the argument/return value passing conventions of the
runtime.
The inputs already account for the constant arguments.
See https://docs.jax.dev/en/latest/internals/constants.html
"""
util.test_event("lower_jaxpr_to_module")
platforms = tuple(map(xb.canonicalize_platform, platforms))
sharded_in_avals = (in_avals if arg_shardings is None else
map(sharded_aval, in_avals, arg_shardings))
sharded_out_avals = (jaxpr.out_avals if result_shardings is None else
map(sharded_aval, jaxpr.out_avals, result_shardings))
if all_default_mem_kind:
arg_memory_kinds = None
result_memory_kinds = None
else:
arg_memory_kinds = (map(_get_mem_kind, arg_shardings)
if arg_shardings is not None else None)
result_memory_kinds = (map(_get_mem_kind, result_shardings)
if result_shardings is not None else None)
# TODO(yashkatariya): Simplify the donation logic.
xla_donated_args = None
platforms_with_donation = [p for p in platforms
if p in _platforms_with_donation]
if platforms_with_donation:
if len(platforms_with_donation) != len(platforms) and (
xla_donated_args or any(donated_args)):
raise NotImplementedError(
"In multi-platform lowering either all or no lowering platforms "
f"should support donation. Lowering for {platforms} of which "
f"only {platforms_with_donation} support donation")
input_output_aliases, donated_args, xla_donated_args = _set_up_aliases(
input_output_aliases, sharded_in_avals, sharded_out_avals, donated_args,
arg_memory_kinds, result_memory_kinds, in_layouts, out_layouts,
result_shardings if num_partitions > 1 else None)
if (num_partitions > 1 and
(result_shardings is None or
any(s is None or isinstance(s, AUTO) or contains_unconstrained(s)
for s in result_shardings))):
if xla_donated_args is None:
xla_donated_args = [False] * len(donated_args)
for input_id in range(len(donated_args)):
if donated_args[input_id]:
xla_donated_args[input_id] = True
donated_args[input_id] = False
if any(donated_args):
unused_donations = [str(a) for a, d in zip(sharded_in_avals, donated_args) if d]
msg = "See an explanation at https://docs.jax.dev/en/latest/faq.html#buffer-donation."
if not platforms_with_donation:
msg = f"Donation is not implemented for {platforms}.\n{msg}"
if unused_donations:
warnings.warn("Some donated buffers were not usable:"
f" {', '.join(unused_donations)}.\n{msg}")
# Delete donated_args by default here, since it's not needed beyond this point
del donated_args
unlowerable_effects = effects_lib.lowerable_effects.filter_not_in(
jaxpr.effects)
if unlowerable_effects:
raise ValueError(f'Cannot lower jaxpr with effects: {jaxpr.effects}')
# HLO channels need to start at 1. We reserve 1 for collectives.
channel_iter = itertools.count(COLLECTIVE_CHANNEL_ID + 1)
# Create a keepalives list that will be mutated during the lowering.
keepalives: list[Any] = []
host_callbacks: list[Any] = []
dim_vars: Sequence[str]
if not config.dynamic_shapes.value:
# Find the dimension variables
all_dim_poly = [d for aval in sharded_in_avals if hasattr(aval, "shape")
for d in aval.shape if not core.is_constant_dim(d)]
dim_vars = tuple(sorted(functools.reduce(lambda acc, new: acc.union(new._get_vars()),
all_dim_poly, set())))
else:
dim_vars = ()
ctx = ModuleContext(backend=backend,
platforms=platforms, axis_context=axis_context,
keepalives=keepalives,
channel_iterator=channel_iter,
host_callbacks=host_callbacks,
lowering_parameters=lowering_parameters,
shape_poly_state=ShapePolyLoweringState(dim_vars, platforms),
all_default_mem_kind=all_default_mem_kind)
with ctx.context, ir.Location.unknown(ctx.context):
# Remove module name characters that XLA would alter. This ensures that
# XLA computation preserves the module name.
attrs = ctx.module.operation.attributes
attrs["sym_name"] = ir.StringAttr.get(
sanitize_name(module_name).rstrip("_"))
attrs["mhlo.num_replicas"] = i32_attr(num_replicas)
attrs["mhlo.num_partitions"] = i32_attr(num_partitions)
lower_jaxpr_to_fun(
ctx, module_name, jaxpr, ordered_effects,
num_const_args=num_const_args,
main_function=True,
replicated_args=replicated_args,
in_avals=in_avals,
arg_shardings=arg_shardings,
result_shardings=result_shardings,
input_output_aliases=input_output_aliases,
xla_donated_args=xla_donated_args,
arg_names=arg_names,
result_names=result_names,
arg_memory_kinds=arg_memory_kinds,
result_memory_kinds=result_memory_kinds,
arg_layouts=in_layouts,
result_layouts=out_layouts,
propagated_out_mem_kinds=propagated_out_mem_kinds)
try:
if not ctx.module.operation.verify():
raise ValueError(
"Cannot lower jaxpr with verifier errors. " +
dump_module_message(ctx.module, "verification"))
except ir.MLIRError as e:
msg_lines = ["Cannot lower jaxpr with verifier errors:"]
def emit_diagnostic_info(d):
msg_lines.append(f"\t{d.message}")
msg_lines.append(f"\t\tat {d.location}")
for n in d.notes:
emit_diagnostic_info(n)
for d in e.error_diagnostics:
emit_diagnostic_info(d)
raise ValueError("\n".join(msg_lines) + "\n" +
dump_module_message(ctx.module, "verification")) from e
with ctx.context:
# Cached lowering rule evaluation leaves dead functions. Remove them.
pipeline = passmanager.PassManager.parse(
'builtin.module(symbol-dce)')
pipeline.run(ctx.module.operation)
if config.use_shardy_partitioner.value:
pipeline = passmanager.PassManager.parse(
'builtin.module(sdy-lift-inlined-meshes)')
pipeline.run(ctx.module.operation)
util.test_event("mlir.collect_lowered_jaxprs", jaxpr, ctx.module)
return LoweringResult(ctx.module, ctx.keepalives, ctx.host_callbacks,
ctx.shape_poly_state)
def _set_up_aliases(input_output_aliases, avals_in, avals_out,
donated_args, arg_memory_kinds, result_memory_kinds,
in_layouts, out_layouts, result_shardings):
if input_output_aliases is None:
input_output_aliases = [None] * len(avals_in)
else:
input_output_aliases = list(input_output_aliases)
# To match-up in-avals to out-avals we only care about the number of
# bytes, so we strip off unrelated aval metadata (eg. the named shape)
strip_metadata = lambda a: (a if a is core.abstract_token else
core.ShapedArray(a.shape, a.dtype))
avals_in = map(strip_metadata, avals_in)
avals_out = map(strip_metadata, avals_out)
# Both arg and result memory kinds need to be specified to donate based on
# the memory kind. For jit's where out_shardings is not specified, we don't
# know the memory kind so don't condition the logic based on the memory kind.
# TODO(yashkatariya): Note that this logic should be in C++ where we make
# donation decisions are made after SPMD propagation passes and memory
# placement passes so that we have all the information.
if (arg_memory_kinds is None or result_memory_kinds is None or
any(a is None for a in arg_memory_kinds) or
any(r is None for r in result_memory_kinds)):
arg_memory_kinds = [None] * len(avals_in)
result_memory_kinds = [None] * len(avals_out)
donations = collections.defaultdict(collections.deque)
for i, (aval, am, donated, aliased) in enumerate(
zip(avals_in, arg_memory_kinds, donated_args, input_output_aliases)):
if donated and aliased is None:
donations[(aval, am)].append(i)
xla_donated_args = None
out_donated_args = list(donated_args)
in_out_layout_not_none = in_layouts is not None and out_layouts is not None
for i, (aval, rm) in enumerate(zip(avals_out, result_memory_kinds)):
# Only donate if memory kinds match. Relax this when the compiler can
# donate across memories.
key = (aval, rm)
if donations.get(key, ()):
input_id = donations[key].popleft()
out_donated_args[input_id] = False
if (in_out_layout_not_none and
isinstance(in_layouts[input_id], AutoLayout) and
not isinstance(out_layouts[i], AutoLayout)):
raise ValueError(
f"Input layout being donated was {in_layouts[input_id]} while"
f" output layout was {out_layouts[i]}. Did you mean to set the"
" **output layout** to **Layout.AUTO**?\nThis will"
" allow for the input and output layout to be chosen by XLA and"
" not the layout of the output which might not be optimal.")
if (in_out_layout_not_none and
not isinstance(in_layouts[input_id], AutoLayout) and
isinstance(out_layouts[i], AutoLayout)):
raise ValueError(
f"Input layout being donated was {in_layouts[input_id]} while"
f" output layout was {out_layouts[i]}. Did you mean to set the"
" **input layout** to **Layout.AUTO**?\nThis will allow"
" for the input and output layout to be chosen by XLA and not the"
" layout of the input which might not be optimal.")
if (in_layouts is None or out_layouts is None or
in_layouts[input_id] == out_layouts[i]) and (
result_shardings is None or not (
(s := result_shardings[i]) is None or
isinstance(s, AUTO) or contains_unconstrained(s))):
input_output_aliases[input_id] = i
else:
# Fallback to xla donation if layouts don't match.
if xla_donated_args is None:
xla_donated_args = [False] * len(avals_in)
xla_donated_args[input_id] = True
aliased_output_ids = {i for i in input_output_aliases if i is not None}
results_not_matched = collections.defaultdict(collections.deque)
for i, (aval, rm) in enumerate(zip(avals_out, result_memory_kinds)):
if i not in aliased_output_ids and aval is not core.abstract_token:
results_not_matched[(aval.size, rm)].append(i)
# For each donated argument that hasn't been aliased or donated to XLA, try to
# find an output array with matching size ignoring shapes. If a matching
# output array is found, then the argument is donated to XLA.
# Similar to the aliasing logic above, an argument is donated to XLA even if
# its layout and the output's layout don't match. This is being done to
# provide more opportunities for XLA to reuse the donated arguments.
for input_idx in range(len(out_donated_args)):
# If the argument is not a token and hasn't been aliased or donated to XLA,
# then try to find an output array with matching size.
if (out_donated_args[input_idx]
and avals_in[input_idx] is not core.abstract_token):
key = (avals_in[input_idx].size, arg_memory_kinds[input_idx])
if results_not_matched.get(key, ()):
# XLA donate the argument because there's a matching output array.
results_not_matched[key].popleft()
out_donated_args[input_idx] = False
if xla_donated_args is None:
xla_donated_args = [False] * len(avals_in)
xla_donated_args[input_idx] = True
return input_output_aliases, out_donated_args, xla_donated_args
Token = ir.Value
token_type = hlo.TokenType.get
create_token = hlo.create_token
| UnconstrainedVariants |
python | doocs__leetcode | solution/1600-1699/1656.Design an Ordered Stream/Solution.py | {
"start": 0,
"end": 507
} | class ____:
def __init__(self, n: int):
self.ptr = 1
self.data = [None] * (n + 1)
def insert(self, idKey: int, value: str) -> List[str]:
self.data[idKey] = value
ans = []
while self.ptr < len(self.data) and self.data[self.ptr]:
ans.append(self.data[self.ptr])
self.ptr += 1
return ans
# Your OrderedStream object will be instantiated and called as such:
# obj = OrderedStream(n)
# param_1 = obj.insert(idKey,value)
| OrderedStream |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/tests/np_indexing_test.py | {
"start": 33916,
"end": 42330
} | class ____(jtu.TestCase):
@parameterized.named_parameters(jtu.cases_from_list({ # pylint: disable=g-complex-comprehension
"testcase_name": "_{}_{}_{}_{}".format(
jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory,
"indexer": indexer, "update_shape": update_shape,
"update_dtype": update_dtype, "op": op
} for name, index_specs in STATIC_INDEXING_TESTS
for shape, indexer in index_specs
for op in UpdateOps
for dtype in (all_dtypes if op == UpdateOps.UPDATE else default_dtypes)
for update_shape in _broadcastable_shapes(_update_shape(shape, indexer))
for update_dtype in all_dtypes
for rng_factory in [jtu.rand_default]))
def testStaticIndexing(self, shape, dtype, update_shape, update_dtype,
rng_factory, indexer, op):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]
np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)
tfnp_fn = lambda x, y: UpdateOps.tfnp_fn(op, indexer, x, y)
self._CheckAgainstNumpy(np_fn, tfnp_fn, args_maker)
# TODO(wangpeng): When indexer is slice(_, 8, -1), XLA throws error "Missing
# xla_context 0-th output from". Investigate.
check_xla = (not has_non_trivial_stride(indexer) and # b/123559667
not (isinstance(indexer, slice) and indexer.stop == 8 and
indexer.step == -1))
self._CompileAndCheck(tfnp_fn, args_maker, check_incomplete_shape=True,
check_experimental_compile=check_xla,
check_xla_forced_compile=check_xla)
@parameterized.named_parameters(jtu.cases_from_list({ # pylint: disable=g-complex-comprehension
"testcase_name": "_{}_{}_{}_{}".format(
jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory,
"indexer": indexer, "update_shape": update_shape,
"update_dtype": update_dtype, "op": op
} for name, index_specs in ADVANCED_INDEXING_TESTS_NO_REPEATS
for shape, indexer in index_specs
for op in UpdateOps
for dtype in (all_dtypes if op == UpdateOps.UPDATE else default_dtypes)
for update_shape in _broadcastable_shapes(_update_shape(shape, indexer))
for update_dtype in all_dtypes
for rng_factory in [jtu.rand_default]))
def testAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,
rng_factory, indexer, op):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]
np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)
tfnp_fn = lambda x, y: UpdateOps.tfnp_fn(op, indexer, x, y)
self._CheckAgainstNumpy(np_fn, tfnp_fn, args_maker)
self._CompileAndCheck(tfnp_fn, args_maker, check_incomplete_shape=True)
@parameterized.named_parameters(jtu.cases_from_list({ # pylint: disable=g-complex-comprehension
"testcase_name": "_{}_{}_{}_{}".format(
jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory,
"indexer": indexer, "update_shape": update_shape,
"update_dtype": update_dtype, "op": op
} for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS
for shape, indexer in index_specs
for op in UpdateOps
for dtype in (all_dtypes if op == UpdateOps.UPDATE else default_dtypes)
for update_shape in _broadcastable_shapes(_update_shape(shape, indexer))
for update_dtype in all_dtypes
for rng_factory in [jtu.rand_default]))
def testMixedAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,
rng_factory, indexer, op):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]
np_fn = lambda x, y: UpdateOps.np_fn(op, indexer, x, y)
tfnp_fn = lambda x, y: UpdateOps.tfnp_fn(op, indexer, x, y)
self._CheckAgainstNumpy(np_fn, tfnp_fn, args_maker)
check_xla = not has_non_trivial_stride(indexer) # b/123559667
self._CompileAndCheck(tfnp_fn, args_maker, check_incomplete_shape=True,
check_experimental_compile=check_xla,
check_xla_forced_compile=check_xla)
@parameterized.named_parameters(jtu.cases_from_list({ # pylint: disable=g-complex-comprehension
"testcase_name": "_{}_{}_{}_{}".format(
jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory,
"indexer": indexer, "update_shape": update_shape,
"update_dtype": update_dtype, "op": op
} for name, index_specs in STATIC_INDEXING_TESTS
for shape, indexer in index_specs
for op in [UpdateOps.ADD, UpdateOps.UPDATE]
for dtype in float_dtypes
for update_shape in _broadcastable_shapes(_update_shape(shape, indexer))
for update_dtype in float_dtypes
for rng_factory in [jtu.rand_default]))
def testStaticIndexingGrads(self, shape, dtype, update_shape, update_dtype,
rng_factory, indexer, op):
rng = rng_factory()
tfnp_fn = lambda x, y: UpdateOps.tfnp_fn(op, indexer, x, y)
x = rng(shape, dtype)
y = rng(update_shape, update_dtype)
self.check_grads(tfnp_fn, (x, y), rtol=1e-3, atol=1e-3, delta=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_start_indices={}_update_shape={}".format( # pylint: disable=g-complex-comprehension
jtu.format_shape_dtype_string(shape, dtype),
start_indices, update_shape),
"shape": shape, "dtype": dtype, "start_indices": start_indices,
"update_shape": update_shape, "rng_factory": rng_factory}
for shape, start_indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(5, 3), (1, -2), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
[(), (), ()],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testDynamicUpdateSlice(self, shape, dtype, start_indices, update_shape,
rng_factory):
rng = rng_factory()
def args_maker():
return [rng(shape, dtype), rng(update_shape, dtype),
onp.array(start_indices)]
# update's shape must be fully known.
# TODO(wangpeng): Support turning off check_incomplete_shape for individual
# arguments.
self._CompileAndCheck(
nje.dynamic_update_slice, args_maker, check_incomplete_shape=False
)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_start_indices={}_update_shape={}".format( # pylint: disable=g-complex-comprehension
jtu.format_shape_dtype_string(shape, dtype),
start_indices, update_shape),
"shape": shape, "dtype": dtype, "start_indices": start_indices,
"update_shape": update_shape, "rng_factory": rng_factory}
for shape, start_indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(5, 3), (1, -2), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
[(), (), ()],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testDynamicUpdateSliceAgainstNumpy(self, shape, dtype, start_indices,
update_shape, rng_factory):
rng = rng_factory()
def args_maker():
return [rng(shape, dtype), rng(update_shape, dtype),
onp.array(start_indices)]
self._CheckAgainstNumpy(
dynamic_update_slice_reference, nje.dynamic_update_slice, args_maker
)
def testDynamicUpdateSliceInDim(self):
rng = jtu.rand_default()
x = rng((6, 7), onp.int32)
y = rng((3, 7), onp.int32)
z = x.copy()
z[2:5] = y
self.assertAllClose(
nje.dynamic_update_slice_in_dim(x, y, 2, 0), z, check_dtypes=True
)
if __name__ == "__main__":
tf_config.set_soft_device_placement(False)
absltest.main()
| IndexedUpdateTest |
python | django__django | django/contrib/postgres/search.py | {
"start": 12213,
"end": 12473
} | class ____(Func):
output_field = FloatField()
def __init__(self, string, expression, **extra):
if not hasattr(string, "resolve_expression"):
string = Value(string)
super().__init__(string, expression, **extra)
| TrigramWordBase |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_parent_reference.py | {
"start": 383,
"end": 6319
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'group': 'str',
'name': 'str',
'namespace': 'str',
'resource': 'str'
}
attribute_map = {
'group': 'group',
'name': 'name',
'namespace': 'namespace',
'resource': 'resource'
}
def __init__(self, group=None, name=None, namespace=None, resource=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ParentReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._group = None
self._name = None
self._namespace = None
self._resource = None
self.discriminator = None
if group is not None:
self.group = group
self.name = name
if namespace is not None:
self.namespace = namespace
self.resource = resource
@property
def group(self):
"""Gets the group of this V1beta1ParentReference. # noqa: E501
Group is the group of the object being referenced. # noqa: E501
:return: The group of this V1beta1ParentReference. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this V1beta1ParentReference.
Group is the group of the object being referenced. # noqa: E501
:param group: The group of this V1beta1ParentReference. # noqa: E501
:type: str
"""
self._group = group
@property
def name(self):
"""Gets the name of this V1beta1ParentReference. # noqa: E501
Name is the name of the object being referenced. # noqa: E501
:return: The name of this V1beta1ParentReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta1ParentReference.
Name is the name of the object being referenced. # noqa: E501
:param name: The name of this V1beta1ParentReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1beta1ParentReference. # noqa: E501
Namespace is the namespace of the object being referenced. # noqa: E501
:return: The namespace of this V1beta1ParentReference. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1beta1ParentReference.
Namespace is the namespace of the object being referenced. # noqa: E501
:param namespace: The namespace of this V1beta1ParentReference. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def resource(self):
"""Gets the resource of this V1beta1ParentReference. # noqa: E501
Resource is the resource of the object being referenced. # noqa: E501
:return: The resource of this V1beta1ParentReference. # noqa: E501
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V1beta1ParentReference.
Resource is the resource of the object being referenced. # noqa: E501
:param resource: The resource of this V1beta1ParentReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and resource is None: # noqa: E501
raise ValueError("Invalid value for `resource`, must not be `None`") # noqa: E501
self._resource = resource
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ParentReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ParentReference):
return True
return self.to_dict() != other.to_dict()
| V1beta1ParentReference |
python | bokeh__bokeh | tests/unit/bokeh/core/test_has_props.py | {
"start": 15213,
"end": 20138
} | class ____(hp.HasProps, hp.Local):
f0 = Required(Int)
f1 = Int()
f2 = Int(default=1)
def test_HasProps_properties_with_values_maintains_order() -> None:
v0 = Some3HasProps()
assert list(v0.properties_with_values(include_defaults=False).items()) == []
assert list(v0.properties_with_values(include_defaults=True).items()) == [("f4", 4), ("f3", 3), ("f2", 2), ("f1", 1)]
v1 = Some3HasProps(f1=10, f4=40)
assert list(v1.properties_with_values(include_defaults=False).items()) == [("f4", 40), ("f1", 10)]
assert list(v1.properties_with_values(include_defaults=True).items()) == [("f4", 40), ("f3", 3), ("f2", 2), ("f1", 10)]
v2 = Some3HasProps(f4=40, f1=10)
assert list(v2.properties_with_values(include_defaults=False).items()) == [("f4", 40), ("f1", 10)]
assert list(v2.properties_with_values(include_defaults=True) .items()) == [("f4", 40), ("f3", 3), ("f2", 2), ("f1", 10)]
def test_HasProps_properties_with_values_unstable() -> None:
v0 = Some0HasProps()
assert v0.properties_with_values(include_defaults=False) == {}
v1 = Some1HasProps()
assert v1.properties_with_values(include_defaults=False) == {}
v2 = Some2HasProps()
assert v2.properties_with_values(include_defaults=False) == {"f0": v2.f0, "f1": v2.f1}
def test_HasProps_properties_with_values_unset() -> None:
v0 = Some4HasProps()
with pytest.raises(UnsetValueError):
v0.properties_with_values(include_defaults=False, include_undefined=False)
with pytest.raises(UnsetValueError):
v0.properties_with_values(include_defaults=True, include_undefined=False)
assert v0.properties_with_values(include_defaults=False, include_undefined=True) == {"f0": Undefined}
assert v0.properties_with_values(include_defaults=True, include_undefined=True) == {"f0": Undefined, "f1": 0, "f2": 1}
v1 = Some4HasProps(f0=10)
assert v1.properties_with_values(include_defaults=False, include_undefined=False) == {"f0": 10}
assert v1.properties_with_values(include_defaults=True, include_undefined=False) == {"f0": 10, "f1": 0, "f2": 1}
assert v1.properties_with_values(include_defaults=False, include_undefined=True) == {"f0": 10}
assert v1.properties_with_values(include_defaults=True, include_undefined=True) == {"f0": 10, "f1": 0, "f2": 1}
def test_HasProps_descriptors() -> None:
v0 = Some0HasProps()
d0 = v0.descriptors()
assert len(d0) == 2
assert d0[0].name == "f0"
assert d0[1].name == "f1"
v1 = Some1HasProps()
d1 = v1.descriptors()
assert len(d1) == 2
assert d1[0].name == "f0"
assert d1[1].name == "f1"
v2 = Some2HasProps()
d2 = v2.descriptors()
assert len(d2) == 5
assert d2[0].name == "f0"
assert d2[1].name == "f1"
assert d2[2].name == "f2"
assert d2[3].name == "f3"
assert d2[4].name == "f4"
def test_HasProps_abstract() -> None:
@hp.abstract
class Base(hp.HasProps, hp.Local):
pass
class Derived(Base):
pass
assert hp.is_abstract(Base) is True
assert hp.is_abstract(Derived) is False
def test_HasProps_clone() -> None:
obj0 = Some0HasProps()
obj1 = Some1HasProps()
obj2 = Some2HasProps(
f0=obj0,
f1=obj1,
f2=2,
f3="uvw",
f4=[7, 8, 9],
)
obj3 = obj2.clone()
assert obj3 is not obj2
assert obj3.f0 is obj0
assert obj3.f1 is obj1
assert obj3.f2 == 2
assert obj3.f3 == "uvw"
assert obj3.f4 is obj2.f4
def test_HasProps_clone_with_overrides() -> None:
obj0 = Some0HasProps()
obj1 = Some1HasProps()
obj2 = Some2HasProps(
f0=obj0,
f1=obj1,
f2=2,
f3="uvw",
f4=[7, 8, 9],
)
obj3 = obj2.clone(f2=3)
assert obj3 is not obj2
assert obj3.f0 is obj0
assert obj3.f1 is obj1
assert obj3.f2 == 3
assert obj3.f3 == "uvw"
assert obj3.f4 is obj2.f4
def test_HasProps_clone_with_unset_properties() -> None:
obj0 = Some4HasProps(f1=1, f2=2)
obj1 = obj0.clone()
assert obj1 is not obj0
assert obj1.properties_with_values(include_defaults=False, include_undefined=True) == dict(f0=Undefined, f1=1, f2=2)
@patch("warnings.warn")
def test_HasProps_model_redefinition(mock_warn: MagicMock) -> None:
class Foo1(hp.HasProps):
__qualified_model__ = "Foo"
class Foo2(hp.HasProps):
__qualified_model__ = "Foo"
assert mock_warn.called
msg, cls = mock_warn.call_args[0]
assert msg.startswith("Duplicate qualified model definition of 'Foo'.")
assert cls is BokehUserWarning
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Some4HasProps |
python | facebook__pyre-check | tools/generate_taint_models/tests/function_tainter_test.py | {
"start": 536,
"end": 620
} | class ____:
x1: str
y: int
@final
@dataclass(frozen=True)
| TestRequestDataclass |
python | walkccc__LeetCode | solutions/767. Reorganize String/767.py | {
"start": 0,
"end": 666
} | class ____:
def reorganizeString(self, s: str) -> str:
count = collections.Counter(s)
if max(count.values()) > (len(s) + 1) // 2:
return ''
ans = []
maxHeap = [(-freq, c) for c, freq in count.items()]
heapq.heapify(maxHeap)
prevFreq = 0
prevChar = '@'
while maxHeap:
# Get the letter with the maximum frequency.
freq, c = heapq.heappop(maxHeap)
ans.append(c)
# Add the previous letter back s.t. any two adjacent characters are not
# the same.
if prevFreq < 0:
heapq.heappush(maxHeap, (prevFreq, prevChar))
prevFreq = freq + 1
prevChar = c
return ''.join(ans)
| Solution |
python | pennersr__django-allauth | allauth/socialaccount/migrations/0006_alter_socialaccount_extra_data.py | {
"start": 93,
"end": 435
} | class ____(migrations.Migration):
dependencies = [
("socialaccount", "0005_socialtoken_nullable_app"),
]
operations = [
migrations.AlterField(
model_name="socialaccount",
name="extra_data",
field=models.JSONField(default=dict, verbose_name="extra data"),
),
]
| Migration |
python | donnemartin__interactive-coding-challenges | graphs_trees/trie/test_trie.py | {
"start": 18,
"end": 2160
} | class ____(unittest.TestCase):
def test_trie(self):
trie = Trie()
print('Test: Insert')
words = ['a', 'at', 'has', 'hat', 'he',
'me', 'men', 'mens', 'met']
for word in words:
trie.insert(word)
for word in trie.list_words():
self.assertTrue(trie.find(word) is not None)
print('Test: Remove me')
trie.remove('me')
words_removed = ['me']
words = ['a', 'at', 'has', 'hat', 'he',
'men', 'mens', 'met']
for word in words:
self.assertTrue(trie.find(word) is not None)
for word in words_removed:
self.assertTrue(trie.find(word) is None)
print('Test: Remove mens')
trie.remove('mens')
words_removed = ['me', 'mens']
words = ['a', 'at', 'has', 'hat', 'he',
'men', 'met']
for word in words:
self.assertTrue(trie.find(word) is not None)
for word in words_removed:
self.assertTrue(trie.find(word) is None)
print('Test: Remove a')
trie.remove('a')
words_removed = ['a', 'me', 'mens']
words = ['at', 'has', 'hat', 'he',
'men', 'met']
for word in words:
self.assertTrue(trie.find(word) is not None)
for word in words_removed:
self.assertTrue(trie.find(word) is None)
print('Test: Remove has')
trie.remove('has')
words_removed = ['a', 'has', 'me', 'mens']
words = ['at', 'hat', 'he',
'men', 'met']
for word in words:
self.assertTrue(trie.find(word) is not None)
for word in words_removed:
self.assertTrue(trie.find(word) is None)
print('Success: test_trie')
def test_trie_remove_invalid(self):
print('Test: Remove from empty trie')
trie = Trie()
self.assertTrue(trie.remove('foo') is None)
def main():
test = TestTrie()
test.test_trie()
test.assertRaises(KeyError, test.test_trie_remove_invalid)
if __name__ == '__main__':
main()
| TestTrie |
python | PyCQA__pylint | tests/functional/ext/docparams/return/missing_return_doc_Numpy.py | {
"start": 2111,
"end": 2455
} | class ____:
"""test_ignores_return_in_abstract_method_numpy_2
Example of a method documenting the return type that an
implementation should return."""
def foo(self, arg):
"""docstring ...
Parameters
----------
arg : int
An argument.
"""
raise NotImplementedError()
| Foo |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/test_component_scaffolding.py | {
"start": 455,
"end": 564
} | class ____(BaseModel):
name: Optional[str] = None
age: Optional[int] = None
| TestParamsModelWithDefaults |
python | chroma-core__chroma | chromadb/types.py | {
"start": 1125,
"end": 1199
} | class ____(Enum):
FLOAT32 = "FLOAT32"
INT32 = "INT32"
| ScalarEncoding |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_releases.py | {
"start": 2106,
"end": 34575
} | class ____(APITestCase, BaseMetricsTestCase):
endpoint = "sentry-api-0-organization-releases"
def assert_expected_versions(self, response, expected):
assert [item["version"] for item in response.data] == [e.version for e in expected]
def test_simple(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org2 = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project1 = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org2)
project3 = self.create_project(teams=[team1], organization=org)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
release2 = Release.objects.create(
organization_id=org2.id,
version="2",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
)
release2.add_project(project2)
release3 = Release.objects.create(
organization_id=org.id,
version="3",
date_added=datetime(2013, 8, 12, 3, 8, 24, 880386, tzinfo=UTC),
date_released=datetime(2013, 8, 15, 3, 8, 24, 880386, tzinfo=UTC),
)
release3.add_project(project3)
release4 = Release.objects.create(
organization_id=org.id,
version="4",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
)
release4.add_project(project3)
response = self.get_success_response(org.slug)
self.assert_expected_versions(response, [release4, release1, release3])
def test_release_list_order_by_date_added(self) -> None:
"""
Test that ensures that by relying on the default date sorting, releases
will only be sorted according to `Release.date_added`, and
`Release.date_released` should have no effect whatsoever on that order
"""
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
release6 = Release.objects.create(
organization_id=org.id,
version="6",
date_added=datetime(2013, 8, 10, 3, 8, 24, 880386, tzinfo=UTC),
date_released=datetime(2013, 8, 20, 3, 8, 24, 880386, tzinfo=UTC),
)
release6.add_project(project)
release7 = Release.objects.create(
organization_id=org.id,
version="7",
date_added=datetime(2013, 8, 12, 3, 8, 24, 880386, tzinfo=UTC),
date_released=datetime(2013, 8, 18, 3, 8, 24, 880386, tzinfo=UTC),
)
release7.add_project(project)
release8 = Release.objects.create(
organization_id=org.id,
version="8",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
date_released=datetime(2013, 8, 16, 3, 8, 24, 880386, tzinfo=UTC),
)
release8.add_project(project)
response = self.get_success_response(org.slug)
self.assert_expected_versions(response, [release8, release7, release6])
def test_release_list_order_by_sessions_empty(self) -> None:
self.login_as(user=self.user)
release_1 = self.create_release(version="1")
release_2 = self.create_release(version="2")
release_3 = self.create_release(version="3")
release_4 = self.create_release(version="4")
release_5 = self.create_release(version="5")
# Make sure ordering works fine when we have no session data at all
response = self.get_success_response(self.organization.slug, sort="sessions", flatten="1")
self.assert_expected_versions(
response, [release_5, release_4, release_3, release_2, release_1]
)
def test_release_list_order_by_sessions(self) -> None:
self.login_as(user=self.user)
release_1 = self.create_release(version="1")
self.store_session(self.build_session(release=release_1))
release_2 = self.create_release(version="2")
release_3 = self.create_release(version="3")
release_4 = self.create_release(version="4")
release_5 = self.create_release(version="5")
self.bulk_store_sessions([self.build_session(release=release_5) for _ in range(2)])
response = self.get_success_response(self.organization.slug, sort="sessions", flatten="1")
self.assert_expected_versions(
response, [release_5, release_1, release_4, release_3, release_2]
)
response = self.get_success_response(
self.organization.slug, sort="sessions", flatten="1", per_page=1
)
self.assert_expected_versions(response, [release_5])
response = self.get_success_response(
self.organization.slug,
sort="sessions",
flatten="1",
per_page=1,
cursor=self.get_cursor_headers(response)[1],
)
self.assert_expected_versions(response, [release_1])
response = self.get_success_response(
self.organization.slug,
sort="sessions",
flatten="1",
per_page=1,
cursor=self.get_cursor_headers(response)[1],
)
self.assert_expected_versions(response, [release_4])
response = self.get_success_response(
self.organization.slug,
sort="sessions",
flatten="1",
per_page=1,
cursor=self.get_cursor_headers(response)[1],
)
self.assert_expected_versions(response, [release_3])
response = self.get_success_response(
self.organization.slug,
sort="sessions",
flatten="1",
per_page=1,
cursor=self.get_cursor_headers(response)[1],
)
self.assert_expected_versions(response, [release_2])
response = self.get_success_response(
self.organization.slug, sort="sessions", flatten="1", per_page=3
)
self.assert_expected_versions(response, [release_5, release_1, release_4])
response = self.get_success_response(
self.organization.slug,
sort="sessions",
flatten="1",
per_page=3,
cursor=self.get_cursor_headers(response)[1],
)
self.assert_expected_versions(response, [release_3, release_2])
def test_release_list_order_by_build_number(self) -> None:
self.login_as(user=self.user)
release_1 = self.create_release(version="test@1.2+1000")
release_2 = self.create_release(version="test@1.2+1")
release_3 = self.create_release(version="test@1.2+200")
self.create_release(version="test@1.2")
self.create_release(version="test@1.2+500alpha")
response = self.get_success_response(self.organization.slug, sort="build")
self.assert_expected_versions(response, [release_1, release_3, release_2])
def test_release_list_order_by_semver(self) -> None:
self.login_as(user=self.user)
release_1 = self.create_release(version="test@2.2")
release_2 = self.create_release(version="test@10.0+122")
release_3 = self.create_release(version="test@2.2-alpha")
release_4 = self.create_release(version="test@2.2.3")
release_5 = self.create_release(version="test@2.20.3")
release_6 = self.create_release(version="test@2.20.3.3")
release_7 = self.create_release(version="test@10.0+123")
release_8 = self.create_release(version="test@some_thing")
release_9 = self.create_release(version="random_junk")
response = self.get_success_response(self.organization.slug, sort="semver")
self.assert_expected_versions(
response,
[
release_7,
release_2,
release_6,
release_5,
release_4,
release_1,
release_3,
release_9,
release_8,
],
)
def test_query_filter(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
release = Release.objects.create(
organization_id=org.id,
version="foobar",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release.add_project(project)
release2 = Release.objects.create(
organization_id=org.id,
version="sdfsdfsdf",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release2.add_project(project)
response = self.get_success_response(org.slug, query="oob")
self.assert_expected_versions(response, [release])
response = self.get_success_response(org.slug, query="baz")
self.assert_expected_versions(response, [])
def test_release_filter(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
release = Release.objects.create(
organization_id=org.id,
version="foobar",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880387, tzinfo=UTC),
)
release.add_project(project)
release2 = Release.objects.create(
organization_id=org.id,
version="release2",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release2.add_project(project)
response = self.get_success_response(
self.organization.slug, query=f"{RELEASE_ALIAS}:foobar"
)
self.assert_expected_versions(response, [release])
response = self.get_success_response(self.organization.slug, query=f"{RELEASE_ALIAS}:foo*")
self.assert_expected_versions(response, [release])
response = self.get_success_response(self.organization.slug, query=f"{RELEASE_ALIAS}:baz")
self.assert_expected_versions(response, [])
response = self.get_success_response(
self.organization.slug, query=f"{RELEASE_ALIAS}:[foobar]"
)
self.assert_expected_versions(response, [release])
response = self.get_success_response(
self.organization.slug, query=f"{RELEASE_ALIAS}:[foobar,release2]"
)
self.assert_expected_versions(response, [release, release2])
# NOT release
response = self.get_success_response(
self.organization.slug, query=f"!{RELEASE_ALIAS}:foobar"
)
self.assert_expected_versions(response, [release2])
response = self.get_success_response(
self.organization.slug, query=f"!{RELEASE_ALIAS}:[foobar]"
)
self.assert_expected_versions(response, [release2])
response = self.get_success_response(
self.organization.slug, query=f"!{RELEASE_ALIAS}:[foobar,release2]"
)
self.assert_expected_versions(response, [])
def test_latest_release_filter(self) -> None:
self.login_as(user=self.user)
project1 = self.create_project(teams=[self.team], organization=self.organization)
project2 = self.create_project(teams=[self.team], organization=self.organization)
self.create_release(version="test@2.2", project=project1)
self.create_release(version="test@2.2-alpha", project=project1)
project1_latest_release = self.create_release(version="test@2.2+122", project=project1)
self.create_release(version="test@20.2.8", project=project2)
project2_latest_release = self.create_release(version="test@21.0.0", project=project2)
response = self.get_success_response(
self.organization.slug, query=f"{RELEASE_ALIAS}:latest"
)
self.assert_expected_versions(
response,
[
project2_latest_release,
project1_latest_release,
],
)
def test_query_filter_suffix(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
org.flags.allow_joinleave = False
org.save()
team = self.create_team(organization=org)
project = self.create_project(teams=[team], organization=org)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
release = Release.objects.create(
organization_id=org.id,
version="com.foo.BarApp@1.0+1234",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release.add_project(project)
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(url + "?query=1.0+(1234)", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["version"] == release.version
url = reverse(
"sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug}
)
response = self.client.get(url + "?query=1.0%2B1234", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["version"] == release.version
def test_semver_filter(self) -> None:
self.login_as(user=self.user)
release_1 = self.create_release(version="test@1.2.4+124")
release_2 = self.create_release(version="test@1.2.3+123")
release_3 = self.create_release(version="test2@1.2.5+125")
release_4 = self.create_release(version="some.release")
response = self.get_success_response(self.organization.slug, query=f"{SEMVER_ALIAS}:>1.2.3")
self.assert_expected_versions(response, [release_3, release_1])
response = self.get_success_response(
self.organization.slug, query=f"{SEMVER_ALIAS}:>=1.2.3"
)
self.assert_expected_versions(response, [release_3, release_2, release_1])
response = self.get_success_response(self.organization.slug, query=f"{SEMVER_ALIAS}:1.2.*")
self.assert_expected_versions(response, [release_3, release_2, release_1])
# NOT semver version
response = self.get_success_response(self.organization.slug, query=f"!{SEMVER_ALIAS}:1.2.3")
self.assert_expected_versions(response, [release_4, release_3, release_1])
response = self.get_success_response(
self.organization.slug, query=f"{SEMVER_ALIAS}:>=1.2.3", sort="semver"
)
self.assert_expected_versions(response, [release_3, release_1, release_2])
response = self.get_success_response(self.organization.slug, query=f"{SEMVER_ALIAS}:2.2.1")
self.assert_expected_versions(response, [])
response = self.get_success_response(
self.organization.slug, query=f"{SEMVER_PACKAGE_ALIAS}:test2"
)
self.assert_expected_versions(response, [release_3])
response = self.get_success_response(
self.organization.slug, query=f"{SEMVER_PACKAGE_ALIAS}:test"
)
self.assert_expected_versions(response, [release_2, release_1])
# NOT semver package
response = self.get_success_response(
self.organization.slug, query=f"!{SEMVER_PACKAGE_ALIAS}:test2"
)
self.assert_expected_versions(response, [release_4, release_2, release_1])
response = self.get_success_response(
self.organization.slug, query=f"{SEMVER_BUILD_ALIAS}:>124"
)
self.assert_expected_versions(response, [release_3])
response = self.get_success_response(
self.organization.slug, query=f"{SEMVER_BUILD_ALIAS}:<125"
)
self.assert_expected_versions(response, [release_2, release_1])
# NOT semver build
response = self.get_success_response(
self.organization.slug, query=f"!{SEMVER_BUILD_ALIAS}:125"
)
self.assert_expected_versions(response, [release_4, release_2, release_1])
def test_release_stage_filter(self) -> None:
self.login_as(user=self.user)
response = self.get_success_response(
self.organization.slug,
query=f"{RELEASE_STAGE_ALIAS}:adopted",
environment=self.environment.name,
)
assert [r["version"] for r in response.data] == []
replaced_release = self.create_release(version="replaced_release")
adopted_release = self.create_release(version="adopted_release")
not_adopted_release = self.create_release(version="not_adopted_release")
adopted_rpe = ReleaseProjectEnvironment.objects.create(
project_id=self.project.id,
release_id=adopted_release.id,
environment_id=self.environment.id,
adopted=timezone.now(),
)
ReleaseProjectEnvironment.objects.create(
project_id=self.project.id,
release_id=replaced_release.id,
environment_id=self.environment.id,
adopted=timezone.now() - timedelta(minutes=5),
unadopted=timezone.now(),
)
ReleaseProjectEnvironment.objects.create(
project_id=self.project.id,
release_id=not_adopted_release.id,
environment_id=self.environment.id,
)
response = self.get_success_response(
self.organization.slug,
query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.ADOPTED.value}",
environment=self.environment.name,
)
self.assert_expected_versions(response, [adopted_release])
response = self.get_success_response(
self.organization.slug,
query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.LOW_ADOPTION.value}",
environment=self.environment.name,
)
self.assert_expected_versions(response, [not_adopted_release])
response = self.get_success_response(
self.organization.slug,
query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.REPLACED.value}",
environment=self.environment.name,
)
self.assert_expected_versions(response, [replaced_release])
# NOT release stage
response = self.get_success_response(
self.organization.slug,
query=f"!{RELEASE_STAGE_ALIAS}:{ReleaseStages.REPLACED.value}",
environment=self.environment.name,
)
self.assert_expected_versions(response, [not_adopted_release, adopted_release])
response = self.get_success_response(
self.organization.slug,
query=f"{RELEASE_STAGE_ALIAS}:[{ReleaseStages.ADOPTED.value},{ReleaseStages.REPLACED.value}]",
environment=self.environment.name,
)
self.assert_expected_versions(response, [adopted_release, replaced_release])
response = self.get_success_response(
self.organization.slug,
query=f"{RELEASE_STAGE_ALIAS}:[{ReleaseStages.LOW_ADOPTION.value}]",
environment=self.environment.name,
)
self.assert_expected_versions(response, [not_adopted_release])
response = self.get_success_response(
self.organization.slug,
sort="adoption",
)
self.assert_expected_versions(
response, [adopted_release, replaced_release, not_adopted_release]
)
adopted_rpe.update(adopted=timezone.now() - timedelta(minutes=15))
# Replaced should come first now.
response = self.get_success_response(
self.organization.slug,
sort="adoption",
)
self.assert_expected_versions(
response, [replaced_release, adopted_release, not_adopted_release]
)
response = self.get_success_response(self.organization.slug, sort="adoption", per_page=1)
self.assert_expected_versions(response, [replaced_release])
next_cursor = self.get_cursor_headers(response)[1]
response = self.get_success_response(
self.organization.slug,
sort="adoption",
per_page=1,
cursor=next_cursor,
)
self.assert_expected_versions(response, [adopted_release])
next_cursor = self.get_cursor_headers(response)[1]
response = self.get_success_response(
self.organization.slug,
sort="adoption",
per_page=1,
cursor=next_cursor,
)
prev_cursor = self.get_cursor_headers(response)[0]
self.assert_expected_versions(response, [not_adopted_release])
response = self.get_success_response(
self.organization.slug,
sort="adoption",
per_page=1,
cursor=prev_cursor,
)
prev_cursor = self.get_cursor_headers(response)[0]
self.assert_expected_versions(response, [adopted_release])
response = self.get_success_response(
self.organization.slug,
sort="adoption",
per_page=1,
cursor=prev_cursor,
)
prev_cursor = self.get_cursor_headers(response)[0]
self.assert_expected_versions(response, [replaced_release])
adopted_rpe.update(adopted=timezone.now() - timedelta(minutes=15))
response = self.get_success_response(
self.organization.slug,
query=f"{RELEASE_STAGE_ALIAS}:[{ReleaseStages.LOW_ADOPTION.value},{ReleaseStages.REPLACED.value}]",
sort="adoption",
environment=self.environment.name,
)
self.assert_expected_versions(response, [replaced_release, not_adopted_release])
response = self.get_response(
self.organization.slug,
query=f"{RELEASE_STAGE_ALIAS}:invalid_stage",
environment=self.environment.name,
)
assert response.status_code == 400
response = self.get_response(
self.organization.slug,
query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.ADOPTED.value}",
# No environment
)
assert response.status_code == 400
def test_project_permissions(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = False
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project1 = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
release2 = Release.objects.create(
organization_id=org.id,
version="2",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
)
release2.add_project(project2)
release3 = Release.objects.create(
organization_id=org.id,
version="3",
date_added=datetime(2013, 8, 12, 3, 8, 24, 880386, tzinfo=UTC),
date_released=datetime(2013, 8, 15, 3, 8, 24, 880386, tzinfo=UTC),
)
release3.add_project(project1)
ax = access.from_user(user, org)
assert ax.has_projects_access([project1])
assert ax.has_project_membership(project1)
assert not ax.has_project_membership(project2)
response = self.get_success_response(org.slug)
self.assert_expected_versions(response, [release1, release3])
def test_project_permissions_open_access(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = True
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project1 = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
release2 = Release.objects.create(
organization_id=org.id,
version="2",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
)
release2.add_project(project2)
release3 = Release.objects.create(
organization_id=org.id,
version="3",
date_added=datetime(2013, 8, 12, 3, 8, 24, 880386, tzinfo=UTC),
date_released=datetime(2013, 8, 15, 3, 8, 24, 880386, tzinfo=UTC),
)
release3.add_project(project1)
ax = access.from_user(user, org)
assert ax.has_projects_access([project1, project2])
assert ax.has_project_membership(project1)
assert not ax.has_project_membership(project2)
response = self.get_success_response(org.slug)
self.assert_expected_versions(response, [release1, release3])
def test_all_projects_parameter(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.create_organization()
org.flags.allow_joinleave = True
org.save()
team1 = self.create_team(organization=org)
team2 = self.create_team(organization=org)
project1 = self.create_project(teams=[team1], organization=org)
project2 = self.create_project(teams=[team2], organization=org)
self.create_member(teams=[team1], user=user, organization=org)
self.login_as(user=user)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
release2 = Release.objects.create(
organization_id=org.id,
version="2",
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC),
)
release2.add_project(project2)
response = self.get_success_response(org.slug, project=[-1])
self.assert_expected_versions(response, [release2, release1])
def test_new_org(self) -> None:
user = self.create_user(is_staff=False, is_superuser=False)
org = self.organization
team = self.create_team(organization=org)
self.create_member(teams=[team], user=user, organization=org)
self.login_as(user=user)
response = self.get_success_response(org.slug)
self.assert_expected_versions(response, [])
def test_archive_release(self) -> None:
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-organization-releases",
kwargs={"organization_id_or_slug": self.organization.slug},
)
# test legacy status value of None (=open)
self.release.status = None
self.release.save()
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
(release_data,) = response.data
response = self.client.post(
url,
format="json",
data={
"version": release_data["version"],
"projects": [x["slug"] for x in release_data["projects"]],
"status": "archived",
},
)
assert response.status_code == 208, response.content
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 0
response = self.client.get(url + "?status=archived", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
response = self.client.get(url + "?status=", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
def test_disallow_archive_release_when_no_open_membership(self) -> None:
release = self.create_release(project=self.project, version="test@1.0")
# disable Open Membership
self.organization.flags.allow_joinleave = False
self.organization.save()
# user has no access to all the projects
user_no_team = self.create_user(is_superuser=False)
self.create_member(
user=user_no_team, organization=self.organization, role="member", teams=[]
)
self.login_as(user_no_team)
url = reverse(
"sentry-api-0-organization-releases",
kwargs={"organization_id_or_slug": self.organization.slug},
)
# trying to archive the release
response = self.client.post(
url,
format="json",
data={
"version": release.version,
"projects": [],
"status": "archived",
},
)
assert response.status_code == 400
assert b"You do not have permission to one of the projects: bar" in response.content
def test_disallow_projects_update_for_release_when_no_open_membership(self) -> None:
team1 = self.create_team(organization=self.organization)
team2 = self.create_team(organization=self.organization)
project1 = self.create_project(
name="not_yours", teams=[team1], organization=self.organization
)
project2 = self.create_project(teams=[team2], organization=self.organization)
release = self.create_release(project=project1, version="test@1.0")
# disable Open Membership
self.organization.flags.allow_joinleave = False
self.organization.save()
# user has no access to projects of team1
user_team2 = self.create_user(is_superuser=False)
self.create_member(
user=user_team2, organization=self.organization, role="member", teams=[team2]
)
self.login_as(user_team2)
url = reverse(
"sentry-api-0-organization-releases",
kwargs={"organization_id_or_slug": self.organization.slug},
)
# trying to update projects of the release
response = self.client.post(
url,
format="json",
data={
"version": release.version,
"projects": [project2.slug],
},
)
assert response.status_code == 400
assert b"You do not have permission to one of the projects: not_yours" in response.content
| OrganizationReleaseListTest |
python | tensorflow__tensorflow | tensorflow/tools/common/traverse_test.py | {
"start": 1096,
"end": 2467
} | class ____(googletest.TestCase):
def test_cycle(self):
class Cyclist(object):
pass
Cyclist.cycle = Cyclist
visitor = TestVisitor()
traverse.traverse(Cyclist, visitor)
# We simply want to make sure we terminate.
def test_module(self):
visitor = TestVisitor()
traverse.traverse(test_module1, visitor)
called = [parent for _, parent, _ in visitor.call_log]
self.assertIn(test_module1.ModuleClass1, called)
self.assertIn(test_module2.ModuleClass2, called)
def test_class(self):
visitor = TestVisitor()
traverse.traverse(TestVisitor, visitor)
self.assertEqual(TestVisitor,
visitor.call_log[0][1])
# There are a bunch of other members, but make sure that the ones we know
# about are there.
self.assertIn('__init__', [name for name, _ in visitor.call_log[0][2]])
self.assertIn('__call__', [name for name, _ in visitor.call_log[0][2]])
# There are more classes descended into, at least __class__ and
# __class__.__base__, neither of which are interesting to us, and which may
# change as part of Python version etc., so we don't test for them.
def test_non_class(self):
integer = 5
visitor = TestVisitor()
traverse.traverse(integer, visitor)
self.assertEqual([], visitor.call_log)
if __name__ == '__main__':
googletest.main()
| TraverseTest |
python | getsentry__sentry | src/sentry/integrations/client.py | {
"start": 78,
"end": 315
} | class ____(BaseApiClient):
integration_type = "integration"
metrics_prefix = "integrations"
logger = logging.getLogger("sentry.integrations.client")
# Used in metrics and logging.
integration_name = "undefined"
| ApiClient |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategies.py | {
"start": 11640,
"end": 17560
} | class ____(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
__slots__ = "columns", "group", "raiseload"
def __init__(self, parent, strategy_key):
super().__init__(parent, strategy_key)
if hasattr(self.parent_property, "composite_class"):
raise NotImplementedError(
"Deferred loading for composite types not implemented yet"
)
self.raiseload = self.strategy_opts.get("raiseload", False)
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
# for a DeferredColumnLoader, this method is only used during a
# "row processor only" query; see test_deferred.py ->
# tests with "rowproc_only" in their name. As of the 1.0 series,
# loading._instance_processor doesn't use a "row processing" function
# to populate columns, instead it uses data in the "populators"
# dictionary. Normally, the DeferredColumnLoader.setup_query()
# sets up that data in the "memoized_populators" dictionary
# and "create_row_processor()" here is never invoked.
if (
context.refresh_state
and context.query._compile_options._only_load_props
and self.key in context.query._compile_options._only_load_props
):
self.parent_property._get_strategy(
(("deferred", False), ("instrument", True))
).create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
elif not self.is_class_level:
if self.raiseload:
set_deferred_for_local_state = (
self.parent_property._raise_column_loader
)
else:
set_deferred_for_local_state = (
self.parent_property._deferred_column_loader
)
populators["new"].append((self.key, set_deferred_for_local_state))
else:
populators["expire"].append((self.key, False))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self.parent_property,
mapper,
useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
load_on_unexpire=False,
default_scalar_value=self.parent_property._default_scalar_value,
)
def setup_query(
self,
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
only_load_props=None,
**kw,
):
if (
(
compile_state.compile_options._render_for_subquery
and self.parent_property._renders_in_subqueries
)
or (
loadopt
and set(self.columns).intersection(
self.parent._should_undefer_in_wildcard
)
)
or (
loadopt
and self.group
and loadopt.local_opts.get(
"undefer_group_%s" % self.group, False
)
)
or (only_load_props and self.key in only_load_props)
):
self.parent_property._get_strategy(
(("deferred", False), ("instrument", True))
).setup_query(
compile_state,
query_entity,
path,
loadopt,
adapter,
column_collection,
memoized_populators,
**kw,
)
elif self.is_class_level:
memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED
elif not self.raiseload:
memoized_populators[self.parent_property] = _DEFER_FOR_STATE
else:
memoized_populators[self.parent_property] = _RAISE_FOR_STATE
def _load_for_state(self, state, passive):
if not state.key:
return LoaderCallableStatus.ATTR_EMPTY
if not passive & PassiveFlag.SQL_OK:
return LoaderCallableStatus.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key
for p in localparent.iterate_properties
if isinstance(p, StrategizedProperty)
and isinstance(p.strategy, _DeferredColumnLoader)
and p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
if self.raiseload:
self._invoke_raise_load(state, passive, "raise")
loading._load_scalar_attributes(
state.mapper, state, set(group), PASSIVE_OFF
)
return LoaderCallableStatus.ATTR_WAS_SET
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to raiseload=True" % (self,)
)
| _DeferredColumnLoader |
python | pypa__pipenv | pipenv/vendor/click/formatting.py | {
"start": 3082,
"end": 9706
} | class ____:
"""This class helps with formatting text-based help pages. It's
usually just needed for very special internal cases, but it's also
exposed so that developers can write their own fancy outputs.
At present, it always writes into memory.
:param indent_increment: the additional increment for each level.
:param width: the width for the text. This defaults to the terminal
width clamped to a maximum of 78.
"""
def __init__(
self,
indent_increment: int = 2,
width: t.Optional[int] = None,
max_width: t.Optional[int] = None,
) -> None:
import shutil
self.indent_increment = indent_increment
if max_width is None:
max_width = 80
if width is None:
width = FORCED_WIDTH
if width is None:
width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)
self.width = width
self.current_indent = 0
self.buffer: t.List[str] = []
def write(self, string: str) -> None:
"""Writes a unicode string into the internal buffer."""
self.buffer.append(string)
def indent(self) -> None:
"""Increases the indentation."""
self.current_indent += self.indent_increment
def dedent(self) -> None:
"""Decreases the indentation."""
self.current_indent -= self.indent_increment
def write_usage(
self, prog: str, args: str = "", prefix: t.Optional[str] = None
) -> None:
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: The prefix for the first line. Defaults to
``"Usage: "``.
"""
if prefix is None:
prefix = f"{_('Usage:')} "
usage_prefix = f"{prefix:>{self.current_indent}}{prog} "
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = " " * term_len(usage_prefix)
self.write(
wrap_text(
args,
text_width,
initial_indent=usage_prefix,
subsequent_indent=indent,
)
)
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write("\n")
indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
self.write(
wrap_text(
args, text_width, initial_indent=indent, subsequent_indent=indent
)
)
self.write("\n")
def write_heading(self, heading: str) -> None:
"""Writes a heading into the buffer."""
self.write(f"{'':>{self.current_indent}}{heading}:\n")
def write_paragraph(self) -> None:
"""Writes a paragraph into the buffer."""
if self.buffer:
self.write("\n")
def write_text(self, text: str) -> None:
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
indent = " " * self.current_indent
self.write(
wrap_text(
text,
self.width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True,
)
)
self.write("\n")
def write_dl(
self,
rows: t.Sequence[t.Tuple[str, str]],
col_max: int = 30,
col_spacing: int = 2,
) -> None:
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError("Expected two columns for definition list")
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write(f"{'':>{self.current_indent}}{first}")
if not second:
self.write("\n")
continue
if term_len(first) <= first_col - col_spacing:
self.write(" " * (first_col - term_len(first)))
else:
self.write("\n")
self.write(" " * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
lines = wrapped_text.splitlines()
if lines:
self.write(f"{lines[0]}\n")
for line in lines[1:]:
self.write(f"{'':>{first_col + self.current_indent}}{line}\n")
else:
self.write("\n")
@contextmanager
def section(self, name: str) -> t.Iterator[None]:
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
@contextmanager
def indentation(self) -> t.Iterator[None]:
"""A context manager that increases the indentation."""
self.indent()
try:
yield
finally:
self.dedent()
def getvalue(self) -> str:
"""Returns the buffer contents."""
return "".join(self.buffer)
def join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]:
"""Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
"""
rv = []
any_prefix_is_slash = False
for opt in options:
prefix = split_opt(opt)[0]
if prefix == "/":
any_prefix_is_slash = True
rv.append((len(prefix), opt))
rv.sort(key=lambda x: x[0])
return ", ".join(x[1] for x in rv), any_prefix_is_slash
| HelpFormatter |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 25726,
"end": 26203
} | class ____(unittest.TestCase):
"""Tests date_time in the hy_AM locale"""
def setUp(self):
self.fake = Faker("hy_AM")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert isinstance(day, str)
assert day in HyAmProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert isinstance(month, str)
assert month in HyAmProvider.MONTH_NAMES.values()
| TestHyAm |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchValue1.py | {
"start": 2275,
"end": 3030
} | class ____(Enum):
red = 1
blue = 2
green = 3
def test_enum_narrowing(m: Medal | Color | int):
match m:
case Medal.gold as a1:
reveal_type(a1, expected_text="Literal[Medal.gold]")
reveal_type(m, expected_text="Literal[Medal.gold]")
case Medal.silver as b1:
reveal_type(b1, expected_text="Literal[Medal.silver]")
reveal_type(m, expected_text="Literal[Medal.silver]")
case Color() as c1:
reveal_type(c1, expected_text="Color")
reveal_type(m, expected_text="Color")
case d1:
reveal_type(d1, expected_text="int | Literal[Medal.bronze]")
reveal_type(m, expected_text="int | Literal[Medal.bronze]")
@dataclass
| Color |
python | Netflix__metaflow | test/core/tests/basic_log.py | {
"start": 67,
"end": 1882
} | class ____(MetaflowTest):
"""
Test that log messages emitted in the first step
are saved and readable.
"""
PRIORITY = 0
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@steps(0, ["singleton"], required=True)
def step_single(self):
import sys
msg1 = "stdout: A regular message.\n"
msg2 = "stdout: A message with unicode: \u5e74\n"
sys.stdout.write(msg1)
if not sys.stdout.encoding:
sys.stdout.write(msg2.encode("utf8"))
else:
sys.stdout.write(msg2)
msg3 = "stderr: A regular message.\n"
msg4 = "stderr: A message with unicode: \u5e74\n"
sys.stderr.write(msg3)
if not sys.stderr.encoding:
sys.stderr.write(msg4.encode("utf8"))
else:
sys.stderr.write(msg4)
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
msg1 = "stdout: A regular message.\n"
msg2 = "stdout: A message with unicode: \u5e74\n"
stdout_combined_msg = "".join([msg1, msg2, ""])
msg3 = "stderr: A regular message.\n"
msg4 = "stderr: A message with unicode: \u5e74\n"
stderr_combined_msg = "".join([msg3, msg4, ""])
for step in flow:
if step.name not in ["start", "end"]:
checker.assert_log(
step.name, "stdout", stdout_combined_msg, exact_match=False
)
checker.assert_log(
step.name, "stderr", stderr_combined_msg, exact_match=False
)
| BasicLogTest |
python | google__jax | jax/_src/test_util.py | {
"start": 8433,
"end": 20620
} | class ____(threading.local):
def __init__(self):
self.counts = {} # Mapping from string name to count.
self.nested_device_put_count = 0 # Number of recursive calls to device_put
# Per-function counts
self.infer_params_fun_counts = None
self.lower_jaxpr_to_fun_counts = None
self.collect_lowered_jaxprs = None
thread_local_state = EventThreadLocalState()
def event_listener(name, *args):
counts = thread_local_state.counts
counts[name] = counts.get(name, 0) + 1
# device_put handlers might call `dispatch.device_put` (e.g. on an
# underlying payload or several). We only want to count these
# recursive puts once, so we skip counting more than the outermost
# one in such a call stack.
if name == "batched_device_put_start":
if thread_local_state.nested_device_put_count == 0:
counts["batched_device_put"] = counts.get("batched_device_put", 0) + 1
thread_local_state.nested_device_put_count += 1
elif name == "batched_device_put_end":
thread_local_state.nested_device_put_count -= 1
elif name == "pjit._infer_params_impl":
# For infer_params, we collect per-function data, but only while a context
# manager is active.
infer_counts = thread_local_state.infer_params_fun_counts
if infer_counts is not None:
(fun,) = args
infer_counts[fun] += 1
elif name == "lower_jaxpr_to_fun":
# For infer_params, we collect per-function data, but only while a context
# manager is active.
lower_counts = thread_local_state.lower_jaxpr_to_fun_counts
if lower_counts is not None:
(fun,) = args
lower_counts[fun] += 1
elif name == "mlir.collect_lowered_jaxprs":
collection = thread_local_state.collect_lowered_jaxprs
if collection is not None:
collection.append(args)
util.test_event_listener = event_listener
def count_events(event):
"Returns a context-manager that yields a function that counts a test event."
@contextmanager
def count_event():
before = thread_local_state.counts.get(event, 0)
yield lambda: thread_local_state.counts.get(event, 0) - before
return count_event
count_device_put = count_events("batched_device_put")
count_device_put_fast_path_hit = count_events("batched_copy_array")
count_pjit_cpp_cache_miss = count_events("jit_cpp_cache_miss")
count_jit_tracing_cache_miss = count_events("create_pjit_jaxpr")
count_aot_jit_cpp_cache_miss = count_events("stages_compiled_call")
count_jit_and_pmap_lowerings = count_events("lower_jaxpr_to_module")
count_jit_compilation_cache_miss = count_events("pxla_cached_compilation")
count_compilation_after_persistent_cache_miss = count_events("compile_after_persistent_compilation_miss")
count_jax_array_shard_arg_calls = count_events("_array_shard_arg")
@contextmanager
def count_primitive_compiles():
dispatch.xla_primitive_callable.cache_clear()
count = [-1]
try:
yield lambda: count[0]
finally:
count[0] = dispatch.xla_primitive_callable.cache_info().misses
@contextmanager
def count_jit_infer_params_cache_miss():
assert thread_local_state.infer_params_fun_counts is None
counts = collections.Counter()
thread_local_state.infer_params_fun_counts = counts
try:
yield counts
finally:
thread_local_state.infer_params_fun_counts = None
@contextmanager
def count_subjaxpr_to_hlo_conversion(fun_name):
assert thread_local_state.lower_jaxpr_to_fun_counts is None
counts = collections.Counter()
thread_local_state.lower_jaxpr_to_fun_counts = counts
try:
yield lambda: counts[fun_name]
finally:
thread_local_state.lower_jaxpr_to_fun_counts = None
@contextmanager
def collect_lowered_jaxprs() -> Iterator[Sequence[tuple[core.ClosedJaxpr,
mlir.ir.Module]]]:
"""
Collects all the pairs of (jaxpr, mlir_module) that are lowered.
"""
assert thread_local_state.collect_lowered_jaxprs is None
collection: list[tuple[core.ClosedJaxpr, mlir.ir.Module]] = []
thread_local_state.collect_lowered_jaxprs = collection
try:
yield collection
finally:
thread_local_state.collect_lowered_jaxprs = None
@contextmanager
def assert_num_jit_and_pmap_compilations(times):
with count_jit_and_pmap_lowerings() as count:
yield
if count() != times:
raise AssertionError(f"Expected exactly {times} XLA compilations, "
f"but executed {count()}")
@contextmanager
def count_internal_device_puts():
before = _jaxlib._jax.get_internal_device_put_info()
counts = {}
try:
yield lambda: counts
finally:
after = _jaxlib._jax.get_internal_device_put_info()
for k, v in after.items():
diff = v - before.get(k, 0)
if diff != 0:
counts[k] = diff
def jaxlib_version() -> tuple[int, ...]:
return _jaxlib.version
def device_under_test():
return _TEST_DUT.value or xla_bridge.get_backend().platform
def supported_dtypes():
if device_under_test() == "tpu":
types = {np.bool_, _dtypes.int4, np.int8, np.int16, np.int32,
_dtypes.uint4, np.uint8, np.uint16, np.uint32,
_dtypes.bfloat16, np.float16, np.float32, np.complex64,
_dtypes.float8_e4m3fn, _dtypes.float8_e4m3b11fnuz,
_dtypes.float8_e5m2}
elif device_under_test() == "gpu":
types = {np.bool_, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
_dtypes.bfloat16, np.float16, np.float32, np.float64,
np.complex64, np.complex128, _dtypes.float8_e4m3fn,
_dtypes.float8_e5m2}
elif device_under_test() == "METAL":
types = {np.int32, np.uint32, np.float32}
else:
types = {np.bool_, _dtypes.int4, np.int8, np.int16, np.int32, np.int64,
_dtypes.uint4, np.uint8, np.uint16, np.uint32, np.uint64,
_dtypes.bfloat16, np.float16, np.float32, np.float64,
np.complex64, np.complex128}
if not config.enable_x64.value:
types -= {np.uint64, np.int64, np.float64, np.complex128}
return types
def is_device_rocm():
return 'rocm' in xla_bridge.get_backend().platform_version
def is_device_cuda():
return 'cuda' in xla_bridge.get_backend().platform_version
def is_cloud_tpu():
return running_in_cloud_tpu_vm
def is_optimized_build():
return _jaxlib._jax.is_optimized_build()
def is_asan():
return _jaxlib._jax.is_asan()
def is_msan():
return _jaxlib._jax.is_msan()
def is_tsan():
return _jaxlib._jax.is_tsan()
def is_sanitized():
return _jaxlib._jax.is_sanitized()
# Returns True if it is not cloud TPU. If it is cloud TPU, returns True if it is
# built at least `date``.
# TODO(b/327203806): after libtpu adds a XLA version and the oldest support
# libtpu contains the XLA version, remove using built time to skip tests.
def if_cloud_tpu_at_least(year: int, month: int, day: int):
date = datetime.date(year, month, day)
if not is_cloud_tpu():
return True
# The format of Cloud TPU platform_version is like:
# PJRT C API
# TFRT TPU v2
# Built on Oct 30 2023 03:04:42 (1698660263) cl/577737722
platform_version = xla_bridge.get_backend().platform_version.split('\n')[-1]
results = re.findall(r'\(.*?\)', platform_version)
if len(results) != 1:
return True
build_date = date.fromtimestamp(int(results[0][1:-1]))
return build_date >= date
def pjrt_c_api_version_at_least(major_version: int, minor_version: int):
pjrt_c_api_versions = xla_bridge.backend_pjrt_c_api_version()
if pjrt_c_api_versions is None:
return True
return pjrt_c_api_versions >= (major_version, minor_version)
def stablehlo_version_at_least(required_version: str):
plugin_version = xla_bridge.backend_stablehlo_version()
if plugin_version is None:
return True
return hlo.get_smaller_version(
".".join(map(str, plugin_version)), required_version
) == plugin_version
def get_tpu_version() -> int:
if device_under_test() != "tpu":
raise ValueError("Device is not TPU")
kind = xla_bridge.devices()[0].device_kind
match = re.match(r"TPU[^\d]*(\d+)", kind)
if match is None:
raise ValueError(f"Device kind {kind} is not supported")
return int(match.group(1))
def is_device_tpu_at_least(version: int) -> bool:
if device_under_test() != "tpu":
return False
return get_tpu_version() >= version
def is_device_tpu(version: int | None = None, variant: str = "") -> bool:
if device_under_test() != "tpu":
return False
if version is None:
return True
device_kind = xla_bridge.devices()[0].device_kind
expected_version = f"v{version}{variant}"
# Special case v5e until the name is updated in device_kind
if expected_version == "v5e":
return "v5 lite" in device_kind
elif expected_version == "v6e":
return "v6 lite" in device_kind
elif expected_version == "v5p":
return device_kind.endswith("v5")
elif expected_version == "v7x":
return "TPU7x" in device_kind
return expected_version in device_kind
def pattern_search(patterns: str | Sequence[str], string: str):
if not isinstance(patterns, tuple):
patterns = (patterns,) # type: ignore
for pattern in patterns:
if re.search(pattern, string):
return pattern
return None
def device_kind_match(device_patterns: str | Sequence[str]):
device_kind = xla_bridge.devices()[0].device_kind
matching_pattern = pattern_search(device_patterns, device_kind)
return matching_pattern
def skip_if_errors(
*,
error_patterns: str | Sequence[str],
device_patterns: str | Sequence[str],
reason: str | Callable[[str, str], str],
):
"""Skip if both error message and device kind match a corresponding pattern."""
def skip(test_method):
@functools.wraps(test_method)
def test_method_wrapper(self, *args, **kwargs):
device_kind = xla_bridge.devices()[0].device_kind
try:
return test_method(self, *args, **kwargs)
except Exception as e:
matching_error_pattern = pattern_search(error_patterns, str(e))
matching_device_pattern = pattern_search(device_patterns, device_kind)
if matching_error_pattern and matching_device_pattern:
if not isinstance(reason, str):
reason_str = reason(matching_error_pattern, matching_device_pattern)
else:
reason_str = reason
self.skipTest(reason_str)
raise
return test_method_wrapper
return skip
skip_if_mosaic_gpu_exceeds_shared_memory = functools.partial(
skip_if_errors,
error_patterns="kernel exceeds available shared memory",
reason=lambda err, dev: f"Mosaic GPU kernel exceeds shared memory on {dev}",
)
skip_if_triton_exceeds_shared_memory = functools.partial(
skip_if_errors,
error_patterns="Shared memory size limit exceeded",
reason=lambda err, dev: f"Triton kernel exceeds shared memory on {dev}",
)
def get_cuda_nonportable_max_cluster_size():
if device_kind_match("GB10$"):
# 12 is the nonportable maximum cluster size on DGX Spark,
# determined by querying cuOccupancyMaxPotentialClusterSize.
return 12
# 16 is the nonportable maximum cluster size on:
# - Hopper: https://docs.nvidia.com/cuda/hopper-tuning-guide/index.html#:~:text=cluster%20size%20of-,16,-by%20opting%20in
# - Blackwell: https://docs.nvidia.com/cuda/blackwell-tuning-guide/index.html#:~:text=cluster%20size%20of-,16,-by%20opting%20in
return 16
def is_cuda_compute_capability_at_least(capability: str) -> bool:
if not is_device_cuda():
return False
d, *_ = xla_bridge.local_devices(backend="gpu")
target = tuple(int(x) for x in capability.split("."))
current = tuple(int(x) for x in d.compute_capability.split("."))
return current >= target
def is_cuda_compute_capability_equal(capability: str) -> bool:
if not is_device_cuda():
return False
d, *_ = xla_bridge.local_devices(backend="gpu")
target = tuple(int(x) for x in capability.split("."))
current = tuple(int(x) for x in d.compute_capability.split("."))
return current == target
def is_cuda_version_at_least(major: int, minor: int):
assert 0 <= major
assert 0 <= minor < 100
return (
cuda_versions is not None
and cuda_versions.cuda_runtime_get_version() >= major * 1000 + minor * 10
)
| EventThreadLocalState |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 13296,
"end": 13352
} | class ____[T]:
def method1(self, x: T) -> T: ...
| Base8 |
python | scipy__scipy | benchmarks/benchmarks/lsq_problems.py | {
"start": 5147,
"end": 9563
} | class ____(LSQBenchmarkProblem):
"""Coating thickness standardization problem, [1]_.
Number of variables --- 134, number of residuals --- 252, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 25
"""
INITIAL_GUESSES = [
np.hstack(([-8.0, 13.0, 1.2, 0.2, 0.1, 6.0, 5.5, -5.2],
np.zeros(126)))
]
def __init__(self, x0):
super().__init__(134, 252, 0.5054986, x0)
self.n0 = self.m // 4
self.xi = np.array([
[0.7140, 0.7169, 0.7232, 0.7151, 0.6848, 0.7070, 0.7177, 0.7073,
0.6734, 0.7174, 0.7125, 0.6947, 0.7121, 0.7166, 0.6894, 0.6897,
0.7024, 0.7026, 0.6800, 0.6957, 0.6987, 0.7111, 0.7097, 0.6809,
0.7139, 0.7046, 0.6950, 0.7032, 0.7019, 0.6975, 0.6955, 0.7056,
0.6965, 0.6848, 0.6995, 0.6105, 0.6027, 0.6084, 0.6081, 0.6057,
0.6116, 0.6052, 0.6136, 0.6032, 0.6081, 0.6092, 0.6122, 0.6157,
0.6191, 0.6169, 0.5483, 0.5371, 0.5576, 0.5521, 0.5495, 0.5499,
0.4937, 0.5092, 0.5433, 0.5018, 0.5363, 0.4977, 0.5296],
[5.145, 5.241, 5.389, 5.211, 5.154, 5.105, 5.191, 5.013, 5.582,
5.208, 5.142, 5.284, 5.262, 6.838, 6.215, 6.817, 6.889, 6.732,
6.717, 6.468, 6.776, 6.574, 6.465, 6.090, 6.350, 4.255, 4.154,
4.211, 4.287, 4.104, 4.007, 4.261, 4.150, 4.040, 4.155, 5.086,
5.021, 5.040, 5.247, 5.125, 5.136, 4.949, 5.253, 5.154, 5.227,
5.120, 5.291, 5.294, 5.304, 5.209, 5.384, 5.490, 5.563, 5.532,
5.372, 5.423, 7.237, 6.944, 6.957, 7.138, 7.009, 7.074, 7.046]
])
self.y = np.array(
[9.3636, 9.3512, 9.4891, 9.1888, 9.3161, 9.2585, 9.2913, 9.3914,
9.4524, 9.4995, 9.4179, 9.468, 9.4799, 11.2917, 11.5062, 11.4579,
11.3977, 11.3688, 11.3897, 11.3104, 11.3882, 11.3629, 11.3149,
11.2474, 11.2507, 8.1678, 8.1017, 8.3506, 8.3651, 8.2994, 8.1514,
8.2229, 8.1027, 8.3785, 8.4118, 8.0955, 8.0613, 8.0979, 8.1364,
8.1700, 8.1684, 8.0885, 8.1839, 8.1478, 8.1827, 8.029, 8.1000,
8.2579, 8.2248, 8.2540, 6.8518, 6.8547, 6.8831, 6.9137, 6.8984,
6.8888, 8.5189, 8.5308, 8.5184, 8.5222, 8.5705, 8.5353, 8.5213,
8.3158, 8.1995, 8.2283, 8.1857, 8.2738, 8.2131, 8.2613, 8.2315,
8.2078, 8.2996, 8.3026, 8.0995, 8.2990, 9.6753, 9.6687, 9.5704,
9.5435, 9.6780, 9.7668, 9.7827, 9.7844, 9.7011, 9.8006, 9.7610,
9.7813, 7.3073, 7.2572, 7.4686, 7.3659, 7.3587, 7.3132, 7.3542,
7.2339, 7.4375, 7.4022, 10.7914, 10.6554, 10.7359, 10.7583,
10.7735, 10.7907, 10.6465, 10.6994, 10.7756, 10.7402, 10.6800,
10.7000, 10.8160, 10.6921, 10.8677, 12.3495, 12.4424, 12.4303,
12.5086, 12.4513, 12.4625, 16.2290, 16.2781, 16.2082, 16.2715,
16.2464, 16.1626, 16.1568]
)
self.scale1 = 4.08
self.scale2 = 0.417
def fun(self, x):
xi = np.vstack(
(self.xi[0] + x[8:8 + self.n0],
self.xi[1] + x[8 + self.n0:])
)
z1 = x[0] + x[1] * xi[0] + x[2] * xi[1] + x[3] * xi[0] * xi[1]
z2 = x[4] + x[5] * xi[0] + x[6] * xi[1] + x[7] * xi[0] * xi[1]
return np.hstack(
(z1 - self.y[:self.n0],
z2 - self.y[self.n0:],
self.scale1 * x[8:8 + self.n0],
self.scale2 * x[8 + self.n0:])
)
def jac(self, x):
J = np.zeros((self.m, self.n))
ind = np.arange(self.n0)
xi = np.vstack(
(self.xi[0] + x[8:8 + self.n0],
self.xi[1] + x[8 + self.n0:])
)
J[:self.n0, 0] = 1
J[:self.n0, 1] = xi[0]
J[:self.n0, 2] = xi[1]
J[:self.n0, 3] = xi[0] * xi[1]
J[ind, ind + 8] = x[1] + x[3] * xi[1]
J[ind, ind + 8 + self.n0] = x[2] + x[3] * xi[0]
J[self.n0:2 * self.n0, 4] = 1
J[self.n0:2 * self.n0, 5] = xi[0]
J[self.n0:2 * self.n0, 6] = xi[1]
J[self.n0:2 * self.n0, 7] = xi[0] * xi[1]
J[ind + self.n0, ind + 8] = x[5] + x[7] * xi[1]
J[ind + self.n0, ind + 8 + self.n0] = x[6] + x[7] * xi[0]
J[ind + 2 * self.n0, ind + 8] = self.scale1
J[ind + 3 * self.n0, ind + 8 + self.n0] = self.scale2
return J
| CoatingThickness |
python | pytorch__pytorch | test/distributed/elastic/multiprocessing/errors/api_test.py | {
"start": 370,
"end": 1305
} | class ____(Exception):
# exists so that we can validate that
# the correct error is raised and propagated
pass
@record
def raise_exception_fn():
raise SentinelError("foobar")
@record
def raise_system_exit_exception_fn(exit_code: int = 1):
exp = SystemExit()
exp.code = exit_code
raise exp
@record
def good_fn():
print("hello world")
@record
def raise_child_failure_error_fn(name, child_error_file=""):
if child_error_file:
with mock.patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": child_error_file}):
ErrorHandler().record_exception(SentinelError("foobar"))
pf = ProcessFailure(local_rank=0, pid=997, exitcode=1, error_file=child_error_file)
raise ChildFailedError(name, {0: pf})
def read_resource_file(resource_file: str) -> str:
with open(os.path.join(os.path.dirname(__file__), resource_file)) as fp:
return "".join(fp.readlines())
| SentinelError |
python | apache__airflow | airflow-core/tests/unit/callbacks/test_callback_requests.py | {
"start": 6912,
"end": 10690
} | class ____:
def test_dag_callback_request_with_context_from_server(self):
"""Test DagCallbackRequest with context_from_server field"""
current_time = timezone.utcnow()
dag_run_data = DRDataModel(
dag_id="test_dag",
run_id="test_run",
logical_date=current_time,
data_interval_start=current_time,
data_interval_end=current_time,
run_after=current_time,
start_date=current_time,
end_date=None,
run_type="manual",
state="running",
consumed_asset_events=[],
partition_key=None,
)
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
map_index=-1,
try_number=1,
dag_version_id=uuid.uuid4(),
)
context_from_server = DagRunContext(dag_run=dag_run_data, last_ti=ti_data)
request = DagCallbackRequest(
filepath="test.py",
dag_id="test_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
context_from_server=context_from_server,
is_failure_callback=True,
msg="test_failure",
)
assert request.context_from_server is not None
assert request.context_from_server.dag_run.dag_id == "test_dag"
assert request.context_from_server.last_ti.task_id == "test_task"
def test_dag_callback_request_without_context_from_server(self):
"""Test DagCallbackRequest without context_from_server field"""
request = DagCallbackRequest(
filepath="test.py",
dag_id="test_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
is_failure_callback=True,
msg="test_failure",
)
assert request.context_from_server is None
def test_dag_callback_request_serialization_with_context(self):
"""Test DagCallbackRequest can be serialized and deserialized with context_from_server"""
current_time = timezone.utcnow()
dag_run_data = DRDataModel(
dag_id="test_dag",
run_id="test_run",
logical_date=current_time,
data_interval_start=current_time,
data_interval_end=current_time,
run_after=current_time,
start_date=current_time,
end_date=None,
run_type="manual",
state="running",
consumed_asset_events=[],
partition_key=None,
)
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
map_index=-1,
try_number=1,
dag_version_id=uuid.uuid4(),
)
context_from_server = DagRunContext(dag_run=dag_run_data, last_ti=ti_data)
request = DagCallbackRequest(
filepath="test.py",
dag_id="test_dag",
run_id="test_run",
bundle_name="testing",
bundle_version=None,
context_from_server=context_from_server,
is_failure_callback=True,
msg="test_failure",
)
# Test serialization
json_str = request.to_json()
# Test deserialization
result = DagCallbackRequest.from_json(json_str)
assert result == request
assert result.context_from_server is not None
assert result.context_from_server.dag_run.dag_id == "test_dag"
assert result.context_from_server.last_ti.task_id == "test_task"
| TestDagCallbackRequestWithContext |
python | sqlalchemy__sqlalchemy | test/sql/test_metadata.py | {
"start": 164247,
"end": 168068
} | class ____(fixtures.RemovesEvents, fixtures.TestBase):
def test_all_events(self):
canary = []
def before_attach(obj, parent):
canary.append(
"%s->%s" % (obj.__class__.__name__, parent.__class__.__name__)
)
def after_attach(obj, parent):
canary.append("%s->%s" % (obj.__class__.__name__, parent))
self.event_listen(
schema.SchemaItem, "before_parent_attach", before_attach
)
self.event_listen(
schema.SchemaItem, "after_parent_attach", after_attach
)
m = MetaData()
Table(
"t1",
m,
Column("id", Integer, Sequence("foo_id"), primary_key=True),
Column("bar", String, ForeignKey("t2.id")),
)
Table("t2", m, Column("id", Integer, primary_key=True))
eq_(
canary,
[
"Sequence->Column",
"Sequence->id",
"ForeignKey->Column",
"ForeignKey->bar",
"Table->MetaData",
"PrimaryKeyConstraint->Table",
"PrimaryKeyConstraint->t1",
"Column->Table",
"Column->t1",
"Column->Table",
"Column->t1",
"ForeignKeyConstraint->Table",
"ForeignKeyConstraint->t1",
"Table->MetaData()",
"Table->MetaData",
"PrimaryKeyConstraint->Table",
"PrimaryKeyConstraint->t2",
"Column->Table",
"Column->t2",
"Table->MetaData()",
],
)
def test_events_per_constraint(self):
canary = []
def evt(target):
def before_attach(obj, parent):
canary.append(
"%s->%s" % (target.__name__, parent.__class__.__name__)
)
def after_attach(obj, parent):
assert hasattr(obj, "name") # so we can change it
canary.append("%s->%s" % (target.__name__, parent))
self.event_listen(target, "before_parent_attach", before_attach)
self.event_listen(target, "after_parent_attach", after_attach)
for target in [
schema.ForeignKeyConstraint,
schema.PrimaryKeyConstraint,
schema.UniqueConstraint,
schema.CheckConstraint,
schema.Index,
]:
evt(target)
m = MetaData()
Table(
"t1",
m,
Column("id", Integer, Sequence("foo_id"), primary_key=True),
Column("bar", String, ForeignKey("t2.id"), index=True),
Column("bat", Integer, unique=True),
)
Table(
"t2",
m,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("bat", Integer),
CheckConstraint("bar>5"),
UniqueConstraint("bar", "bat"),
Index(None, "bar", "bat"),
)
eq_(
canary,
[
"PrimaryKeyConstraint->Table",
"PrimaryKeyConstraint->t1",
"Index->Table",
"Index->t1",
"ForeignKeyConstraint->Table",
"ForeignKeyConstraint->t1",
"UniqueConstraint->Table",
"UniqueConstraint->t1",
"PrimaryKeyConstraint->Table",
"PrimaryKeyConstraint->t2",
"CheckConstraint->Table",
"CheckConstraint->t2",
"UniqueConstraint->Table",
"UniqueConstraint->t2",
"Index->Table",
"Index->t2",
],
)
| CatchAllEventsTest |
python | huggingface__transformers | src/transformers/models/helium/modeling_helium.py | {
"start": 15353,
"end": 18486
} | class ____(HeliumPreTrainedModel):
def __init__(self, config: HeliumConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[HeliumDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = HeliumRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
| HeliumModel |
python | google__jax | tests/pallas/mosaic_gpu_test.py | {
"start": 3696,
"end": 5608
} | class ____(jtu.JaxTestCase, metaclass=PallasTestMetaclass):
LOWERING_SEMANTICS: ClassVar[plgpu.LoweringSemantics]
def setUp(self):
if not jtu.is_cuda_compute_capability_at_least("9.0"):
self.skipTest("Only works on a GPU with capability >= sm90")
self.enter_context(pallas_call._PALLAS_USE_MOSAIC_GPU(True))
super().setUp()
def skip_if_wg_semantics(self):
if self.LOWERING_SEMANTICS == plgpu.LoweringSemantics.Warpgroup:
self.skipTest("Not supported under WG semantics")
def kernel(self, *args, **kwargs):
compiler_params = dataclasses.replace(
kwargs.pop("compiler_params", plgpu.CompilerParams()),
lowering_semantics=self.LOWERING_SEMANTICS,
)
return plgpu.kernel(*args, compiler_params=compiler_params, **kwargs)
def pallas_call(self, *args, **kwargs):
compiler_params = dataclasses.replace(
kwargs.pop("compiler_params", plgpu.CompilerParams()),
lowering_semantics=self.LOWERING_SEMANTICS,
)
return pl.pallas_call(*args, compiler_params=compiler_params, **kwargs)
@contextlib.contextmanager
def capture_stdout(self):
if "pytest" in sys.modules:
self.skipTest("pytest interacts badly with GPU stdout capture")
if mosaic_gpu_lib is None:
raise ValueError("Running tests but missing Mosaic GPU extension")
with jtu.capture_stdout() as stdout:
yield stdout
# We need to cudaDeviceSynchronize to make sure printfs are flushed.
mosaic_gpu_lib._mosaic_gpu_ext._sync_all_devices()
def default_transforms(
self, *, swizzle: int = 128, dtype: jnp.dtype
) -> Sequence[plgpu.MemoryRefTransform]:
if self.LOWERING_SEMANTICS == plgpu.LoweringSemantics.Warpgroup:
return ()
swizzle_elems = 8 * swizzle // dtypes.itemsize_bits(dtype)
return (
plgpu.TilingTransform((8, swizzle_elems)),
plgpu.SwizzleTransform(swizzle),
)
| PallasTest |
python | huggingface__transformers | src/transformers/models/albert/modeling_albert.py | {
"start": 14032,
"end": 16772
} | class ____(AlbertPreTrainedModel):
config_class = AlbertConfig
base_model_prefix = "albert"
def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertTransformer(config)
if add_pooling_layer:
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
else:
self.pooler = None
self.pooler_activation = None
self.attn_implementation = config._attn_implementation
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Embedding:
return self.embeddings.word_embeddings
def set_input_embeddings(self, value: nn.Embedding) -> None:
self.embeddings.word_embeddings = value
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[BaseModelOutputWithPooling, tuple]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask,
position_ids=position_ids,
**kwargs,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
)
@auto_docstring(
custom_intro="""
Albert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`sentence order prediction (classification)` head.
"""
)
| AlbertModel |
python | PyCQA__pylint | tests/functional/s/superfluous_parens.py | {
"start": 1704,
"end": 2491
} | class ____:
keys = []
def __iter__(self):
return ((k, getattr(self, k)) for k in self.keys)
if (A == 2) is not (B == 2):
pass
K = ("Test " + "String") # [superfluous-parens]
M = A is not (A <= H)
M = True is not (M == K)
M = True is not (True is not False) # pylint: disable=comparison-of-constants
Z = "TestString"
X = ("Test " + "String") # [superfluous-parens]
Y = ("Test " + "String") in Z # [superfluous-parens]
assert ("Test " + "String") in "hello" # [superfluous-parens]
assert ("Version " + "String") in ("Version " + "String") # [superfluous-parens]
hi = ("CONST") # [superfluous-parens]
hi = ("CONST",)
#TODO: maybe get this line to report [superfluous-parens] without causing other false positives.
assert "" + ("Version " + "String") in Z
| ClassA |
python | openai__openai-python | src/openai/types/evals/run_cancel_response.py | {
"start": 12962,
"end": 14527
} | class ____(BaseModel):
id: str
"""Unique identifier for the evaluation run."""
created_at: int
"""Unix timestamp (in seconds) when the evaluation run was created."""
data_source: DataSource
"""Information about the run's data source."""
error: EvalAPIError
"""An object representing an error response from the Eval API."""
eval_id: str
"""The identifier of the associated evaluation."""
metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
model: str
"""The model that is evaluated, if applicable."""
name: str
"""The name of the evaluation run."""
object: Literal["eval.run"]
"""The type of the object. Always "eval.run"."""
per_model_usage: List[PerModelUsage]
"""Usage statistics for each model during the evaluation run."""
per_testing_criteria_results: List[PerTestingCriteriaResult]
"""Results per testing criteria applied during the evaluation run."""
report_url: str
"""The URL to the rendered evaluation run report on the UI dashboard."""
result_counts: ResultCounts
"""Counters summarizing the outcomes of the evaluation run."""
status: str
"""The status of the evaluation run."""
| RunCancelResponse |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategies.py | {
"start": 100555,
"end": 120052
} | class ____(_PostLoader, util.MemoizedSlots):
__slots__ = (
"join_depth",
"omit_join",
"_parent_alias",
"_query_info",
"_fallback_query_info",
)
query_info = collections.namedtuple(
"queryinfo",
[
"load_only_child",
"load_with_join",
"in_expr",
"pk_cols",
"zero_idx",
"child_lookup_cols",
],
)
_chunksize = 500
def __init__(self, parent, strategy_key):
super().__init__(parent, strategy_key)
self.join_depth = self.parent_property.join_depth
is_m2o = self.parent_property.direction is interfaces.MANYTOONE
if self.parent_property.omit_join is not None:
self.omit_join = self.parent_property.omit_join
else:
lazyloader = self.parent_property._get_strategy(
(("lazy", "select"),)
)
if is_m2o:
self.omit_join = lazyloader.use_get
else:
self.omit_join = self.parent._get_clause[0].compare(
lazyloader._rev_lazywhere,
use_proxies=True,
compare_keys=False,
equivalents=self.parent._equivalent_columns,
)
if self.omit_join:
if is_m2o:
self._query_info = self._init_for_omit_join_m2o()
self._fallback_query_info = self._init_for_join()
else:
self._query_info = self._init_for_omit_join()
else:
self._query_info = self._init_for_join()
def _init_for_omit_join(self):
pk_to_fk = dict(
self.parent_property._join_condition.local_remote_pairs
)
pk_to_fk.update(
(equiv, pk_to_fk[k])
for k in list(pk_to_fk)
for equiv in self.parent._equivalent_columns.get(k, ())
)
pk_cols = fk_cols = [
pk_to_fk[col] for col in self.parent.primary_key if col in pk_to_fk
]
if len(fk_cols) > 1:
in_expr = sql.tuple_(*fk_cols)
zero_idx = False
else:
in_expr = fk_cols[0]
zero_idx = True
return self.query_info(False, False, in_expr, pk_cols, zero_idx, None)
def _init_for_omit_join_m2o(self):
pk_cols = self.mapper.primary_key
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
lazyloader = self.parent_property._get_strategy((("lazy", "select"),))
lookup_cols = [lazyloader._equated_columns[pk] for pk in pk_cols]
return self.query_info(
True, False, in_expr, pk_cols, zero_idx, lookup_cols
)
def _init_for_join(self):
self._parent_alias = AliasedClass(self.parent.class_)
pa_insp = inspect(self._parent_alias)
pk_cols = [
pa_insp._adapt_element(col) for col in self.parent.primary_key
]
if len(pk_cols) > 1:
in_expr = sql.tuple_(*pk_cols)
zero_idx = False
else:
in_expr = pk_cols[0]
zero_idx = True
return self.query_info(False, True, in_expr, pk_cols, zero_idx, None)
def init_class_attribute(self, mapper):
self.parent_property._get_strategy(
(("lazy", "select"),)
).init_class_attribute(mapper)
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
if context.refresh_state:
return self._immediateload_create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
(
effective_path,
run_loader,
execution_options,
recursion_depth,
) = self._setup_for_recursion(
context, path, loadopt, join_depth=self.join_depth
)
if not run_loader:
return
if not context.compile_state.compile_options._enable_eagerloads:
return
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." % self
)
# a little dance here as the "path" is still something that only
# semi-tracks the exact series of things we are loading, still not
# telling us about with_polymorphic() and stuff like that when it's at
# the root.. the initial MapperEntity is more accurate for this case.
if len(path) == 1:
if not orm_util._entity_isa(query_entity.entity_zero, self.parent):
return
elif not orm_util._entity_isa(path[-1], self.parent):
return
selectin_path = effective_path
path_w_prop = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_entity = path_w_prop.get(
context.attributes, "path_with_polymorphic", None
)
if with_poly_entity is not None:
effective_entity = inspect(with_poly_entity)
else:
effective_entity = self.entity
loading._PostLoad.callable_for_path(
context,
selectin_path,
self.parent,
self.parent_property,
self._load_for_path,
effective_entity,
loadopt,
recursion_depth,
execution_options,
)
def _load_for_path(
self,
context,
path,
states,
load_only,
effective_entity,
loadopt,
recursion_depth,
execution_options,
):
if load_only and self.key not in load_only:
return
query_info = self._query_info
if query_info.load_only_child:
our_states = collections.defaultdict(list)
none_states = []
mapper = self.parent
for state, overwrite in states:
state_dict = state.dict
related_ident = tuple(
mapper._get_state_attr_by_column(
state,
state_dict,
lk,
passive=attributes.PASSIVE_NO_FETCH,
)
for lk in query_info.child_lookup_cols
)
# if the loaded parent objects do not have the foreign key
# to the related item loaded, then degrade into the joined
# version of selectinload
if LoaderCallableStatus.PASSIVE_NO_RESULT in related_ident:
query_info = self._fallback_query_info
break
# organize states into lists keyed to particular foreign
# key values.
if None not in related_ident:
our_states[related_ident].append(
(state, state_dict, overwrite)
)
else:
# For FK values that have None, add them to a
# separate collection that will be populated separately
none_states.append((state, state_dict, overwrite))
# note the above conditional may have changed query_info
if not query_info.load_only_child:
our_states = [
(state.key[1], state, state.dict, overwrite)
for state, overwrite in states
]
pk_cols = query_info.pk_cols
in_expr = query_info.in_expr
if not query_info.load_with_join:
# in "omit join" mode, the primary key column and the
# "in" expression are in terms of the related entity. So
# if the related entity is polymorphic or otherwise aliased,
# we need to adapt our "pk_cols" and "in_expr" to that
# entity. in non-"omit join" mode, these are against the
# parent entity and do not need adaption.
if effective_entity.is_aliased_class:
pk_cols = [
effective_entity._adapt_element(col) for col in pk_cols
]
in_expr = effective_entity._adapt_element(in_expr)
bundle_ent = orm_util.Bundle("pk", *pk_cols)
bundle_sql = bundle_ent.__clause_element__()
entity_sql = effective_entity.__clause_element__()
q = Select._create_raw_select(
_raw_columns=[bundle_sql, entity_sql],
_compile_options=_ORMCompileState.default_compile_options,
_propagate_attrs={
"compile_state_plugin": "orm",
"plugin_subject": effective_entity,
},
)
if not query_info.load_with_join:
# the Bundle we have in the "omit_join" case is against raw, non
# annotated columns, so to ensure the Query knows its primary
# entity, we add it explicitly. If we made the Bundle against
# annotated columns, we hit a performance issue in this specific
# case, which is detailed in issue #4347.
q = q.select_from(effective_entity)
else:
# in the non-omit_join case, the Bundle is against the annotated/
# mapped column of the parent entity, but the #4347 issue does not
# occur in this case.
q = q.select_from(self._parent_alias).join(
getattr(self._parent_alias, self.parent_property.key).of_type(
effective_entity
)
)
q = q.filter(in_expr.in_(sql.bindparam("primary_keys")))
# a test which exercises what these comments talk about is
# test_selectin_relations.py -> test_twolevel_selectin_w_polymorphic
#
# effective_entity above is given to us in terms of the cached
# statement, namely this one:
orig_query = context.compile_state.select_statement
# the actual statement that was requested is this one:
# context_query = context.user_passed_query
#
# that's not the cached one, however. So while it is of the identical
# structure, if it has entities like AliasedInsp, which we get from
# aliased() or with_polymorphic(), the AliasedInsp will likely be a
# different object identity each time, and will not match up
# hashing-wise to the corresponding AliasedInsp that's in the
# cached query, meaning it won't match on paths and loader lookups
# and loaders like this one will be skipped if it is used in options.
#
# as it turns out, standard loader options like selectinload(),
# lazyload() that have a path need
# to come from the cached query so that the AliasedInsp etc. objects
# that are in the query line up with the object that's in the path
# of the strategy object. however other options like
# with_loader_criteria() that doesn't have a path (has a fixed entity)
# and needs to have access to the latest closure state in order to
# be correct, we need to use the uncached one.
#
# as of #8399 we let the loader option itself figure out what it
# wants to do given cached and uncached version of itself.
effective_path = path[self.parent_property]
if orig_query is context.user_passed_query:
new_options = orig_query._with_options
else:
cached_options = orig_query._with_options
uncached_options = context.user_passed_query._with_options
# propagate compile state options from the original query,
# updating their "extra_criteria" as necessary.
# note this will create a different cache key than
# "orig" options if extra_criteria is present, because the copy
# of extra_criteria will have different boundparam than that of
# the QueryableAttribute in the path
new_options = [
orig_opt._adapt_cached_option_to_uncached_option(
context, uncached_opt
)
for orig_opt, uncached_opt in zip(
cached_options, uncached_options
)
]
if loadopt and loadopt._extra_criteria:
new_options += (
orm_util.LoaderCriteriaOption(
effective_entity,
loadopt._generate_extra_criteria(context),
),
)
if recursion_depth is not None:
effective_path = effective_path._truncate_recursive()
q = q.options(*new_options)
q = q._update_compile_options({"_current_path": effective_path})
if context.populate_existing:
q = q.execution_options(populate_existing=True)
if self.parent_property.order_by:
if not query_info.load_with_join:
eager_order_by = self.parent_property.order_by
if effective_entity.is_aliased_class:
eager_order_by = [
effective_entity._adapt_element(elem)
for elem in eager_order_by
]
q = q.order_by(*eager_order_by)
else:
def _setup_outermost_orderby(compile_context):
compile_context.eager_order_by += tuple(
util.to_list(self.parent_property.order_by)
)
q = q._add_compile_state_func(
_setup_outermost_orderby, self.parent_property
)
if query_info.load_only_child:
self._load_via_child(
our_states,
none_states,
query_info,
q,
context,
execution_options,
)
else:
self._load_via_parent(
our_states, query_info, q, context, execution_options
)
def _load_via_child(
self,
our_states,
none_states,
query_info,
q,
context,
execution_options,
):
uselist = self.uselist
# this sort is really for the benefit of the unit tests
our_keys = sorted(our_states)
while our_keys:
chunk = our_keys[0 : self._chunksize]
our_keys = our_keys[self._chunksize :]
data = {
k: v
for k, v in context.session.execute(
q,
params={
"primary_keys": [
key[0] if query_info.zero_idx else key
for key in chunk
]
},
execution_options=execution_options,
).unique()
}
for key in chunk:
# for a real foreign key and no concurrent changes to the
# DB while running this method, "key" is always present in
# data. However, for primaryjoins without real foreign keys
# a non-None primaryjoin condition may still refer to no
# related object.
related_obj = data.get(key, None)
for state, dict_, overwrite in our_states[key]:
if not overwrite and self.key in dict_:
continue
state.get_impl(self.key).set_committed_value(
state,
dict_,
related_obj if not uselist else [related_obj],
)
# populate none states with empty value / collection
for state, dict_, overwrite in none_states:
if not overwrite and self.key in dict_:
continue
# note it's OK if this is a uselist=True attribute, the empty
# collection will be populated
state.get_impl(self.key).set_committed_value(state, dict_, None)
def _load_via_parent(
self, our_states, query_info, q, context, execution_options
):
uselist = self.uselist
_empty_result = () if uselist else None
while our_states:
chunk = our_states[0 : self._chunksize]
our_states = our_states[self._chunksize :]
primary_keys = [
key[0] if query_info.zero_idx else key
for key, state, state_dict, overwrite in chunk
]
data = collections.defaultdict(list)
for k, v in itertools.groupby(
context.session.execute(
q,
params={"primary_keys": primary_keys},
execution_options=execution_options,
).unique(),
lambda x: x[0],
):
data[k].extend(vv[1] for vv in v)
for key, state, state_dict, overwrite in chunk:
if not overwrite and self.key in state_dict:
continue
collection = data.get(key, _empty_result)
if not uselist and collection:
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded "
"attribute '%s' " % self
)
state.get_impl(self.key).set_committed_value(
state, state_dict, collection[0]
)
else:
# note that empty tuple set on uselist=False sets the
# value to None
state.get_impl(self.key).set_committed_value(
state, state_dict, collection
)
def _single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent."
% (orm_util.instance_str(value), state.class_, prop),
code="bbf1",
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(
desc, "append", append, raw=True, retval=True, active_history=True
)
event.listen(desc, "set", set_, raw=True, retval=True, active_history=True)
| _SelectInLoader |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/schemas/role_and_permission_schema.py | {
"start": 1473,
"end": 1637
} | class ____(Schema):
"""Permissions list schema."""
actions = fields.List(fields.Nested(ActionSchema))
total_entries = fields.Int()
| ActionCollectionSchema |
python | pytorch__pytorch | torch/ao/quantization/observer.py | {
"start": 63521,
"end": 63906
} | class ____(Enum):
"""
Placeholder for dtypes that do not exist in PyTorch core yet.
"""
# torch.int1 to torch.int7 will be added to PyTorch 2.6
# These will remain here for BC with older PyTorch versions
INT1 = auto()
INT2 = auto()
INT3 = auto()
INT4 = auto()
INT5 = auto()
INT6 = auto()
INT7 = auto()
@dataclass(frozen=True)
| TorchAODType |
python | pallets__jinja | src/jinja2/lexer.py | {
"start": 8503,
"end": 9013
} | class ____:
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream: "TokenStream") -> None:
self.stream = stream
def __iter__(self) -> "TokenStreamIterator":
return self
def __next__(self) -> Token:
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration
next(self.stream)
return token
| TokenStreamIterator |
python | tensorflow__tensorflow | tensorflow/compiler/tests/image_ops_test.py | {
"start": 1531,
"end": 4249
} | class ____(xla_test.XLATestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in self.float_types:
inp = _generate_numpy_random_rgb(shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.session() as sess:
batch0 = array_ops.placeholder(nptype, shape=shape)
with self.test_scope():
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops_stack.unstack(batch0)
with self.test_scope():
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops_stack.stack(split1)
join2 = array_ops_stack.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2],
{batch0: inp})
# Verify that processing batch elements together is the same as separate
self.assertAllCloseAccordingToType(batch1, join1, half_rtol=0.000002)
self.assertAllCloseAccordingToType(batch2, join2, half_rtol=0.000002)
self.assertAllCloseAccordingToType(
batch2, inp, bfloat16_atol=0.03, half_rtol=0.02)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in self.float_types:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv = image_ops.rgb_to_hsv(placeholder)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(rgb_tf, rgb_np, bfloat16_atol=0.03)
def testRGBToHSVNumpy(self):
"""Tests the RGB to HSV conversion matches a reference implementation."""
for nptype in self.float_types:
rgb_flat = _generate_numpy_random_rgb((64, 3)).astype(nptype)
rgb_np = rgb_flat.reshape(4, 4, 4, 3) # pylint: disable=too-many-function-args
hsv_np = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_np = hsv_np.reshape(4, 4, 4, 3) # pylint: disable=too-many-function-args
with self.session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv_op = image_ops.rgb_to_hsv(placeholder)
hsv_tf = hsv_op.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(hsv_tf, hsv_np)
| RGBToHSVTest |
python | google__jax | jax/_src/pallas/mosaic/pipeline.py | {
"start": 9117,
"end": 13990
} | class ____:
"""Abstract interface for BufferedRefs."""
@property
def spec(self) -> pl.BlockSpec:
raise NotImplementedError()
@property
def buffer_type(self) -> BufferType:
raise NotImplementedError()
@property
def is_buffered(self) -> bool:
return False
@property
def is_input(self):
return self.buffer_type.is_input
@property
def is_output(self):
return self.buffer_type.is_output
@property
def is_accumulator(self):
return self.buffer_type == BufferType.ACCUMULATOR
@property
def is_input_output(self):
return self.buffer_type == BufferType.INPUT_OUTPUT
@property
def is_manual(self):
return self.buffer_type == BufferType.MANUAL
def init_slots(self):
"""Initialize slot indices."""
raise NotImplementedError()
def advance_copy_in_slot(self, predicate: bool = True) -> BufferedRefBase:
"""Advance the copy in slot."""
raise NotImplementedError()
def advance_wait_in_slot(self, predicate: bool = True) -> BufferedRefBase:
"""Advance the wait in slot."""
raise NotImplementedError()
def advance_copy_out_slot(self, predicate: bool = True) -> BufferedRefBase:
"""Advance the copy out slot."""
raise NotImplementedError()
def advance_wait_out_slot(self, predicate: bool = True) -> BufferedRefBase:
"""Advance the wait out slot."""
raise NotImplementedError()
def load_slots(self, predicate: bool | jax.Array = True) -> BufferedRefBase:
"""Load slot information into registers."""
raise NotImplementedError()
def save_slots(self, predicate: bool | jax.Array = True):
"""Save slot information from registers."""
raise NotImplementedError()
@property
def block_shape(self) -> Sequence[pl.BlockDim | int | None] | None:
return self.spec.block_shape
@property
def compute_index(self):
return self.spec.index_map
def get_dma_slice(self, src_shape, src_dtype, grid_indices):
# We need to handle blocks that might go OOB in the src array. An in bounds
# block looks like this (for array shape (600, 600) and block shape
# (256, 256)):
#
# +--------------+------------------|
# | Block (0,0) | |
# | (256, 256) | |
# +--------------+ |
# | A (600, 600) |
# | |
# +---------------------------------+
#
# For in-bounds blocks, we don't need to do anything special.
# An out-of-bounds block looks like this:
#
# +--------------+------------------|
# | |
# | |
# + |
# | A (600, 600) |
# +--------------+ |
# | Block (2,0) | |
# + --------------------------------|
# | XXXXXXXXXX |
# +--------------+
# where the X's indicate where the block is out of bounds.
#
# When we have an out of bounds block like this, we need to truncate it to
# a tile boundary (tiles are (8, 128) along the two minormost dimensions).
# In this case, we'll have a block that is indexing the
# 512:768 elements of A along the first dimension. We need to convert 768
# into 600 (600 % 8 == 0), so our indexing will look like this:
# +--------------+------------------|
# | |
# | |
# + |
# | A (600, 600) |
# +--------------+ |
# | Block (2,0) | |
# + --------------------------------|
# where it is now a (88, 256) sized block.
#
# Suppose A is now (601, 600), instead of picking a (88, 256)-sized block
# for the last iteration on that dimension, we will pick the next highest
# tile multiple, i.e. (96, 256).
if len(src_shape) < 2:
raise NotImplementedError("Must use >1D values.")
tiling = _make_tiling(src_shape, src_dtype)
block_indices = self.compute_index(*grid_indices)
return tuple(
_make_block_slice(bi, bs, ss, t)
for bi, bs, ss, t in zip(
block_indices, self.block_shape, src_shape, tiling, strict=True
)
)
def bind_existing_ref(self, window_ref, indices):
"""For handling VMEM references, the pipeline aliases the existing ref."""
del window_ref, indices
return self
def unbind_refs(self):
return self
def with_spec(self, spec: pl.BlockSpec) -> BufferedRefBase:
"""Returns a new BufferedRefBase with the given block spec."""
raise NotImplementedError()
# TODO(justinfu): Refactor and rename slot fields to reflect cumulative values
# instead of slot index.
@tree_util.register_pytree_node_class
@dataclasses.dataclass(frozen=True)
| BufferedRefBase |
python | Farama-Foundation__Gymnasium | gymnasium/error.py | {
"start": 44,
"end": 120
} | class ____(Exception):
"""Error superclass."""
# Registration errors
| Error |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass5.py | {
"start": 137,
"end": 293
} | class ____(type):
def __eq__(self, a: "type[ClassA]") -> str:
return "hi"
def __add__(self, a: "type[ClassA]") -> int:
return 0
| MetaA |
python | rapidsai__cudf | python/cudf/cudf/core/udf/masked_typing.py | {
"start": 13529,
"end": 14016
} | class ____(AbstractTemplate):
"""
Typing for int(Masked)
returns the result of calling "int" on the input
TODO: retains the validity of the input rather than
raising as in int(pd.NA)
"""
def generic(self, args, kws):
if isinstance(args[0], MaskedType):
# following numpy convention np.dtype(int) -> dtype('int64')
return nb_signature(MaskedType(types.int64), args[0])
@cuda_decl_registry.register_global(abs)
| MaskedScalarIntCast |
python | GoogleCloudPlatform__python-docs-samples | datastore/cloud-ndb/flask_app.py | {
"start": 978,
"end": 1174
} | class ____(ndb.Model):
title = ndb.StringProperty()
@app.route("/")
def list_books():
books = Book.query()
return str([book.to_dict() for book in books])
# [END datastore_ndb_flask]
| Book |
python | PrefectHQ__prefect | src/integrations/prefect-dask/prefect_dask/client.py | {
"start": 374,
"end": 5281
} | class ____(Client):
def submit(
self,
func,
*args,
key=None,
workers=None,
resources=None,
retries=None,
priority=0,
fifo_timeout="100 ms",
allow_other_workers=False,
actor=False,
actors=False,
pure=True,
**kwargs,
):
if isinstance(func, Task):
run_task_kwargs = {}
run_task_kwargs["task"] = func
task_run_id = uuid4()
run_task_kwargs["task_run_id"] = task_run_id
passed_dependencies = kwargs.pop("dependencies", None)
run_task_kwargs["wait_for"] = kwargs.pop("wait_for", None)
run_task_kwargs["return_type"] = kwargs.pop("return_type", "result")
if (parameters := kwargs.get("parameters")) is None:
# If parameters are not provided, we need to extract them from the function.
# This case is when the PrefectDistributedClient is used directly without
# the DaskTaskRunner.
parameters = get_call_parameters(func, args, kwargs)
run_task_kwargs["parameters"] = parameters
dependencies = {
k: collect_task_run_inputs_sync(v, future_cls=Future)
for k, v in parameters.items()
}
if passed_dependencies:
dependencies = {
k: v.union(passed_dependencies.get(k, set()))
for k, v in dependencies.items()
}
run_task_kwargs["dependencies"] = dependencies
context = serialize_context(
asset_ctx_kwargs={
"task": func,
"task_run_id": task_run_id,
"task_inputs": dependencies,
"copy_to_child_ctx": True,
}
)
run_task_kwargs["context"] = context
@wraps(func)
def wrapper_func(*args, **kwargs):
if func.isasync:
return asyncio.run(run_task_async(*args, **kwargs))
else:
return run_task_sync(*args, **kwargs)
future = super().submit(
wrapper_func,
key=key,
workers=workers,
resources=resources,
retries=retries,
priority=priority,
fifo_timeout=fifo_timeout,
allow_other_workers=allow_other_workers,
actor=actor,
actors=actors,
pure=pure,
**run_task_kwargs,
)
future.task_run_id = run_task_kwargs["task_run_id"]
return future
else:
return super().submit(
func,
*args,
key=key,
workers=workers,
resources=resources,
retries=retries,
priority=priority,
fifo_timeout=fifo_timeout,
allow_other_workers=allow_other_workers,
actor=actor,
actors=actors,
pure=pure,
**kwargs,
)
def map(
self,
func,
*iterables,
key=None,
workers=None,
retries=None,
resources=None,
priority=0,
allow_other_workers=False,
fifo_timeout="100 ms",
actor=False,
actors=False,
pure=True,
batch_size=None,
**kwargs,
):
if isinstance(func, Task):
args_list = zip(*iterables)
futures = []
for args in args_list:
futures.append(
self.submit(
func,
*args,
key=key,
workers=workers,
resources=resources,
retries=retries,
priority=priority,
fifo_timeout=fifo_timeout,
allow_other_workers=allow_other_workers,
actor=actor,
actors=actors,
pure=pure,
**kwargs,
)
)
return futures
else:
return super().map(
func,
*iterables,
key=key,
workers=workers,
retries=retries,
resources=resources,
priority=priority,
allow_other_workers=allow_other_workers,
fifo_timeout=fifo_timeout,
actor=actor,
actors=actors,
pure=pure,
batch_size=batch_size,
**kwargs,
)
| PrefectDaskClient |
python | doocs__leetcode | solution/0200-0299/0253.Meeting Rooms II/Solution2.py | {
"start": 0,
"end": 313
} | class ____:
def minMeetingRooms(self, intervals: List[List[int]]) -> int:
d = defaultdict(int)
for l, r in intervals:
d[l] += 1
d[r] -= 1
ans = s = 0
for _, v in sorted(d.items()):
s += v
ans = max(ans, s)
return ans
| Solution |
python | numba__numba | numba/core/typing/npdatetime.py | {
"start": 5660,
"end": 6462
} | class ____(AbstractTemplate):
key = operator.add
def generic(self, args, kws):
if len(args) == 1:
# Guard against unary +
return
left, right = args
if isinstance(right, types.NPTimedelta):
dt = left
td = right
elif isinstance(left, types.NPTimedelta):
dt = right
td = left
else:
return
if isinstance(dt, types.NPDatetime):
unit = npdatetime_helpers.combine_datetime_timedelta_units(dt.unit,
td.unit)
if unit is not None:
return signature(types.NPDatetime(unit), left, right)
@infer_global(operator.sub)
@infer_global(operator.isub)
| DatetimePlusTimedelta |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/utils.py | {
"start": 14792,
"end": 15832
} | class ____(Protocol[_T_contra, _T_co]):
"""Protocol for objects that support addition."""
def __add__(self, x: _T_contra, /) -> _T_co:
"""Add the object to another object."""
Addable = TypeVar("Addable", bound=SupportsAdd[Any, Any])
def add(addables: Iterable[Addable]) -> Addable | None:
"""Add a sequence of addable objects together.
Args:
addables: The addable objects to add.
Returns:
The result of adding the addable objects.
"""
final: Addable | None = None
for chunk in addables:
final = chunk if final is None else final + chunk
return final
async def aadd(addables: AsyncIterable[Addable]) -> Addable | None:
"""Asynchronously add a sequence of addable objects together.
Args:
addables: The addable objects to add.
Returns:
The result of adding the addable objects.
"""
final: Addable | None = None
async for chunk in addables:
final = chunk if final is None else final + chunk
return final
| SupportsAdd |
python | huggingface__transformers | src/transformers/models/swin2sr/modeling_swin2sr.py | {
"start": 1406,
"end": 3389
} | class ____(ModelOutput):
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
# Copied from transformers.models.swin.modeling_swin.window_partition
def window_partition(input_feature, window_size):
"""
Partitions the given input into windows.
"""
batch_size, height, width, num_channels = input_feature.shape
input_feature = input_feature.view(
batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
)
windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows
# Copied from transformers.models.swin.modeling_swin.window_reverse
def window_reverse(windows, window_size, height, width):
"""
Merges windows to produce higher resolution features.
"""
num_channels = windows.shape[-1]
windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
return windows
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.swin.modeling_swin.SwinDropPath with Swin->Swin2SR
| Swin2SREncoderOutput |
python | huggingface__transformers | tests/models/layoutlmv3/test_processing_layoutlmv3.py | {
"start": 2855,
"end": 19171
} | class ____(unittest.TestCase):
@cached_property
def get_images(self):
# we verify our implementation on 2 document images from the DocVQA dataset
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
return ds[0]["image"].convert("RGB"), ds[1]["image"].convert("RGB")
@cached_property
def get_tokenizers(self):
slow_tokenizer = LayoutLMv3Tokenizer.from_pretrained("microsoft/layoutlmv3-base", add_visual_labels=False)
fast_tokenizer = LayoutLMv3TokenizerFast.from_pretrained("microsoft/layoutlmv3-base", add_visual_labels=False)
return [slow_tokenizer, fast_tokenizer]
@slow
def test_processor_case_1(self):
# case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True
image_processor = LayoutLMv3ImageProcessor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv3Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
input_image_proc = image_processor(images[0], return_tensors="pt")
input_processor = processor(images[0], return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify image
self.assertAlmostEqual(
input_image_proc["pixel_values"].sum(), input_processor["pixel_values"].sum(), delta=1e-2
)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "<s> 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer</s>" # fmt: skip
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
input_image_proc = image_processor(images, return_tensors="pt")
input_processor = processor(images, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify images
self.assertAlmostEqual(
input_image_proc["pixel_values"].sum(), input_processor["pixel_values"].sum(), delta=1e-2
)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "<s> 7 ITC Limited REPORT AND ACCOUNTS 2013 ITC’s Brands: An Asset for the Nation The consumer needs and aspirations they fulfil, the benefit they generate for millions across ITC’s value chains, the future-ready capabilities that support them, and the value that they create for the country, have made ITC’s brands national assets, adding to India’s competitiveness. It is ITC’s aspiration to be the No 1 FMCG player in the country, driven by its new FMCG businesses. A recent Nielsen report has highlighted that ITC's new FMCG businesses are the fastest growing among the top consumer goods companies operating in India. ITC takes justifiable pride that, along with generating economic value, these celebrated Indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. DI WILLS * ; LOVE DELIGHTFULLY SOFT SKIN? aia Ans Source: https://www.industrydocuments.ucsf.edu/docs/snbx0223</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>" # fmt: skip
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
@slow
def test_processor_case_2(self):
# case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False
image_processor = LayoutLMv3ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv3Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt")
# verify keys
expected_keys = ["input_ids", "bbox", "attention_mask", "pixel_values"]
actual_keys = list(input_processor.keys())
for key in expected_keys:
self.assertIn(key, actual_keys)
# verify input_ids
expected_decoding = "<s> hello world</s>"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "<s> hello world</s><pad><pad><pad>"
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[0, 0, 0, 0],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_3(self):
# case 3: token classification (training), apply_ocr=False
image_processor = LayoutLMv3ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv3Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
words = ["weirdly", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
word_labels = [1, 2]
input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "input_ids", "labels", "pixel_values"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "<s> weirdly world</s>"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify labels
expected_labels = [-100, 1, -100, 2, -100]
self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
word_labels = [[1, 2], [6, 3, 10, 2]]
input_processor = processor(
images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "input_ids", "labels", "pixel_values"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "<s> my name is niels</s>"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[0, 0, 0, 0],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
# verify labels
expected_labels = [-100, 6, 3, 10, 2, -100, -100]
self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)
@slow
def test_processor_case_4(self):
# case 4: visual question answering (inference), apply_ocr=True
image_processor = LayoutLMv3ImageProcessor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv3Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
input_processor = processor(images[0], question, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "<s> What's his name?</s></s> 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer</s>" # fmt: skip
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
input_processor = processor(
images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "<s> what's the time</s></s> 7 ITC Limited REPORT AND ACCOUNTS 2013 ITC</s>"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [0, 0, 0, 0]] # fmt: skip
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_5(self):
# case 5: visual question answering (inference), apply_ocr=False
image_processor = LayoutLMv3ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv3Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], question, words, boxes, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "<s> What's his name?</s></s> hello world</s>"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "<s> How old is he?</s></s> hello world</s><pad><pad>"
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_decoding = "<s> what's the time</s></s> my name is niels</s>"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [0, 0, 0, 0]]
self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)
| LayoutLMv3ProcessorIntegrationTests |
python | kamyu104__LeetCode-Solutions | Python/design-hashmap.py | {
"start": 29,
"end": 185
} | class ____(object):
def __init__(self, key, val):
self.val = val
self.key = key
self.next = None
self.prev = None
| ListNode |
python | wandb__wandb | wandb/vendor/pygments/lexers/robotframework.py | {
"start": 11673,
"end": 12207
} | class ____(_Table):
_tokenizer_class = Setting
def __init__(self, template_setter, prev_tokenizer=None):
_Table.__init__(self, prev_tokenizer)
self._template_setter = template_setter
def _tokenize(self, value, index):
if index == 0 and normalize(value) == 'testtemplate':
self._tokenizer = Setting(self._template_setter)
return _Table._tokenize(self, value, index)
def end_row(self):
self.__init__(self._template_setter, prev_tokenizer=self._tokenizer)
| SettingTable |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/tests/test_worker.py | {
"start": 53474,
"end": 63236
} | class ____:
@pytest.fixture
def flow_run(self):
return FlowRun(flow_id=uuid.uuid4(), name="my-flow-run-name")
@pytest.fixture
def deployment(self):
return DeploymentResponse(name="my-deployment-name", flow_id=uuid.uuid4())
@pytest.fixture
def work_pool(self):
return WorkPool(name="my-work-pool-name", type="kubernetes")
@pytest.fixture
def flow(self):
return Flow(name="my-flow-name")
@pytest.mark.parametrize(
"template,values,expected_after_template,expected_after_preparation",
from_template_and_values_cases,
ids=[
"default base template with no values",
"default base template with custom env",
"default base template with values",
"custom template with values",
],
)
async def test_job_configuration_preparation(
self,
template,
values,
expected_after_template,
expected_after_preparation,
flow_run,
deployment,
flow,
work_pool,
):
"""Tests that the job configuration is correctly templated and prepared."""
result = await KubernetesWorkerJobConfiguration.from_template_and_values(
base_job_template=template,
values=values,
)
# comparing dictionaries produces cleaner diffs
assert result.model_dump() == expected_after_template.model_dump()
result.prepare_for_flow_run(
flow_run=flow_run,
deployment=deployment,
flow=flow,
work_pool=work_pool,
worker_name="test-worker",
)
assert (
result.model_dump()
== expected_after_preparation(
flow_run=flow_run,
deployment=deployment,
flow=flow,
work_pool=work_pool,
worker_name="test-worker",
).model_dump()
)
async def test_validates_against_an_empty_job(self):
"""We should give a human-friendly error when the user provides an empty custom
Job manifest"""
template = KubernetesWorker.get_default_base_job_template()
template["job_configuration"]["job_manifest"] = {}
with pytest.raises(ValidationError) as excinfo:
await KubernetesWorkerJobConfiguration.from_template_and_values(
template, {}
)
assert len(errs := excinfo.value.errors()) == 1
assert "Job is missing required attributes" in errs[0]["msg"]
assert "/apiVersion" in errs[0]["msg"]
assert "/kind" in errs[0]["msg"]
assert "/spec" in errs[0]["msg"]
async def test_validates_for_a_job_missing_deeper_attributes(self):
"""We should give a human-friendly error when the user provides an incomplete
custom Job manifest"""
template = KubernetesWorker.get_default_base_job_template()
template["job_configuration"]["job_manifest"] = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {},
"spec": {"template": {"spec": {}}},
}
with pytest.raises(ValidationError) as excinfo:
await KubernetesWorkerJobConfiguration.from_template_and_values(
template, {}
)
assert len(errs := excinfo.value.errors()) == 1
assert "Job is missing required attributes" in errs[0]["msg"]
assert "/spec/template/spec/completions" in errs[0]["msg"]
assert "/spec/template/spec/containers" in errs[0]["msg"]
assert "/spec/template/spec/parallelism" in errs[0]["msg"]
assert "/spec/template/spec/restartPolicy" in errs[0]["msg"]
async def test_validates_for_a_job_with_incompatible_values(self):
"""We should give a human-friendly error when the user provides a custom Job
manifest that is attempting to change required values."""
template = KubernetesWorker.get_default_base_job_template()
template["job_configuration"]["job_manifest"] = {
"apiVersion": "v1",
"kind": "JobbledyJunk",
"metadata": {"labels": {}},
"spec": {
"template": {
"spec": {
"parallelism": 1,
"completions": 1,
"restartPolicy": "Never",
"containers": [
{
"name": "prefect-job",
"env": [],
}
],
}
}
},
}
with pytest.raises(ValidationError) as excinfo:
await KubernetesWorkerJobConfiguration.from_template_and_values(
template, {}
)
assert len(errs := excinfo.value.errors()) == 1
assert "Job has incompatible values" in errs[0]["msg"]
assert "/apiVersion must have value 'batch/v1'" in errs[0]["msg"]
assert "/kind must have value 'Job'" in errs[0]["msg"]
async def test_user_supplied_base_job_with_labels(self, flow_run):
"""The user can supply a custom base job with labels and they will be
included in the final manifest"""
template = KubernetesWorker.get_default_base_job_template()
template["job_configuration"]["job_manifest"] = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {"labels": {"my-custom-label": "sweet"}},
"spec": {
"template": {
"spec": {
"parallelism": 1,
"completions": 1,
"restartPolicy": "Never",
"containers": [
{
"name": "prefect-job",
"env": [],
}
],
}
}
},
}
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
template, {}
)
assert configuration.job_manifest["metadata"]["labels"] == {
# the labels provided in the user's job base
"my-custom-label": "sweet",
}
configuration.prepare_for_flow_run(flow_run)
assert (
configuration.job_manifest["metadata"]["labels"]["my-custom-label"]
== "sweet"
)
async def test_user_can_supply_a_sidecar_container_and_volume(self, flow_run):
"""The user can supply a custom base job that includes more complex
modifications, like a sidecar container and volumes"""
template = KubernetesWorker.get_default_base_job_template()
template["job_configuration"]["job_manifest"] = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {"labels": {}},
"spec": {
"template": {
"spec": {
"parallelism": 1,
"completions": 1,
"restartPolicy": "Never",
"containers": [
{
"name": "prefect-job",
"env": [],
},
{
"name": "my-sidecar",
"image": "cool-peeps/cool-code:latest",
"volumeMounts": [
{"name": "data-volume", "mountPath": "/data/"}
],
},
],
"volumes": [
{"name": "data-volume", "hostPath": "/all/the/data/"}
],
}
}
},
}
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
template, {}
)
configuration.prepare_for_flow_run(flow_run)
pod = configuration.job_manifest["spec"]["template"]["spec"]
assert pod["volumes"] == [{"name": "data-volume", "hostPath": "/all/the/data/"}]
# the prefect-job container is still populated
assert pod["containers"][0]["name"] == "prefect-job"
assert pod["containers"][0]["args"] == ["prefect", "flow-run", "execute"]
assert pod["containers"][1] == {
"name": "my-sidecar",
"image": "cool-peeps/cool-code:latest",
"volumeMounts": [{"name": "data-volume", "mountPath": "/data/"}],
}
def test_env_can_be_a_list(self):
job_manifest = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {"labels": {"my-custom-label": "sweet"}},
"spec": {
"template": {
"spec": {
"parallelism": 1,
"completions": 1,
"restartPolicy": "Never",
"containers": [
{
"name": "prefect-job",
"env": [],
}
],
}
}
},
}
KubernetesWorkerJobConfiguration(
job_manifest=job_manifest,
env=[
{
"name": "TEST_ENV",
"value": "test",
}
],
)
| TestKubernetesWorkerJobConfiguration |
python | bokeh__bokeh | tests/unit/bokeh/colors/test_util__colors.py | {
"start": 1432,
"end": 3889
} | class ____:
def test_init(self) -> None:
c = bcu.NamedColor("aliceblue", 240, 248, 255)
assert c.name == "aliceblue"
def test_repr(self) -> None:
c = bcu.NamedColor("aliceblue", 240, 248, 255)
assert repr(c) == c.to_css()
def test_to_css(self) -> None:
c = bcu.NamedColor("aliceblue", 240, 248, 255)
assert c.to_css() == "aliceblue"
def test_find(self) -> None:
c = bcu.NamedColor.find("cornflowerblue")
assert c.name == "cornflowerblue"
assert bcu.NamedColor.find("bluey") is None
def test_from_string(self) -> None:
# Name
c = bcu.NamedColor.from_string("blue")
assert c.name == "blue"
# '#rrggbb'
c = bcu.NamedColor.from_string("#A3B20F")
assert (c.r, c.g, c.b, c.a) == (163, 178, 15, 1.0)
c = bcu.NamedColor.from_string("#a3b20f")
assert (c.r, c.g, c.b, c.a) == (163, 178, 15, 1.0)
# '#rrggbbaa'
c = bcu.NamedColor.from_string("#A3B20FC0")
assert (c.r, c.g, c.b, c.a) == (163, 178, 15, 192/255.0)
c = bcu.NamedColor.from_string("#a3b20fc0")
assert (c.r, c.g, c.b, c.a) == (163, 178, 15, 192/255.0)
# '#rgb'
c = bcu.NamedColor.from_string("#7A3")
assert (c.r, c.g, c.b, c.a) == (119, 170, 51, 1.0)
c = bcu.NamedColor.from_string("#7a3")
assert (c.r, c.g, c.b, c.a) == (119, 170, 51, 1.0)
# '#rgba'
c = bcu.NamedColor.from_string("#7A3B")
assert (c.r, c.g, c.b, c.a) == (119, 170, 51, 187/255.0)
c = bcu.NamedColor.from_string("#7a3b")
assert (c.r, c.g, c.b, c.a) == (119, 170, 51, 187/255.0)
# Invalid name
with pytest.raises(ValueError):
bcu.NamedColor.from_string("bluey")
# Invalid hex string
with pytest.raises(ValueError):
bcu.NamedColor.from_string("#")
with pytest.raises(ValueError):
bcu.NamedColor.from_string("#1")
with pytest.raises(ValueError):
bcu.NamedColor.from_string("#12")
with pytest.raises(ValueError):
bcu.NamedColor.from_string("#12345")
with pytest.raises(ValueError):
bcu.NamedColor.from_string("#1234567")
with pytest.raises(ValueError):
bcu.NamedColor.from_string("#123456789")
with pytest.raises(ValueError):
bcu.NamedColor.from_string(" #abc")
| Test_NamedColor |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI059.py | {
"start": 1349,
"end": 1516
} | class ____(Generic[T]): # Only one generic
pass
# syntax errors with starred and keyword arguments from
# https://github.com/astral-sh/ruff/issues/18602
| SomeGeneric |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py | {
"start": 16254,
"end": 19919
} | class ____(LayerRNNCell):
"""The most basic RNN cell.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`. It could also be string
that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
warnings.warn("`tf.nn.rnn_cell.BasicRNNCell` is deprecated and will be "
"removed in a future version. This class "
"is equivalent as `tf.keras.layers.SimpleRNNCell`, "
"and will be replaced by that in Tensorflow 2.0.")
super(BasicRNNCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if context.executing_eagerly() and tf_config.list_logical_devices("GPU"):
logging.warning(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" %
str(inputs_shape))
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, self._num_units])
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
self.built = True
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
_check_rnn_cell_input_dtypes([inputs, state])
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, state], 1), self._kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
return output, output
def get_config(self):
config = {
"num_units": self._num_units,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(BasicRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export(v1=["nn.rnn_cell.GRUCell"])
| BasicRNNCell |
python | PyCQA__pycodestyle | testing/data/E30not.py | {
"start": 979,
"end": 1459
} | class ____():
"""Class Foo"""
def b():
pass
# comment
def c():
pass
# comment
def d():
pass
# This is a
# ... multi-line comment
# And this one is
# ... a second paragraph
# ... which spans on 3 lines
# Function `e` is below
# NOTE: Hey this is a testcase
def e():
pass
def a():
print
# comment
print
print
# Comment 1
# Comment 2
# Comment 3
def b():
pass
#: Okay
def foo():
pass
def bar():
pass
| Foo |
python | ray-project__ray | python/ray/serve/_private/deployment_info.py | {
"start": 335,
"end": 6232
} | class ____:
def __init__(
self,
deployment_config: DeploymentConfig,
replica_config: ReplicaConfig,
start_time_ms: int,
deployer_job_id: str,
actor_name: Optional[str] = None,
version: Optional[str] = None,
end_time_ms: Optional[int] = None,
route_prefix: str = None,
ingress: bool = False,
target_capacity: Optional[float] = None,
target_capacity_direction: Optional[TargetCapacityDirection] = None,
):
self.deployment_config = deployment_config
self.replica_config = replica_config
# The time when .deploy() was first called for this deployment.
self.start_time_ms = start_time_ms
self.actor_name = actor_name
self.version = version
self.deployer_job_id = deployer_job_id
# The time when this deployment was deleted.
self.end_time_ms = end_time_ms
# ephermal state
self._cached_actor_def = None
self.route_prefix = route_prefix
self.ingress = ingress
self.target_capacity = target_capacity
self.target_capacity_direction = target_capacity_direction
def __getstate__(self) -> Dict[Any, Any]:
clean_dict = self.__dict__.copy()
del clean_dict["_cached_actor_def"]
return clean_dict
def __setstate__(self, d: Dict[Any, Any]) -> None:
self.__dict__ = d
self._cached_actor_def = None
def update(
self,
deployment_config: DeploymentConfig = None,
replica_config: ReplicaConfig = None,
version: str = None,
route_prefix: str = None,
) -> "DeploymentInfo":
return DeploymentInfo(
deployment_config=deployment_config or self.deployment_config,
replica_config=replica_config or self.replica_config,
start_time_ms=self.start_time_ms,
deployer_job_id=self.deployer_job_id,
actor_name=self.actor_name,
version=version or self.version,
end_time_ms=self.end_time_ms,
route_prefix=route_prefix or self.route_prefix,
ingress=self.ingress,
target_capacity=self.target_capacity,
target_capacity_direction=self.target_capacity_direction,
)
def set_target_capacity(
self,
new_target_capacity: Optional[float],
new_target_capacity_direction: Optional[TargetCapacityDirection],
):
self.target_capacity = new_target_capacity
self.target_capacity_direction = new_target_capacity_direction
def config_changed(self, other) -> bool:
return (
self.deployment_config != other.deployment_config
or self.replica_config.ray_actor_options
!= other.replica_config.ray_actor_options
or other.version is None
or self.version != other.version
)
@property
def actor_def(self):
if self._cached_actor_def is None:
assert self.actor_name is not None
# Break circular import :(.
from ray.serve._private.replica import ReplicaActor
# Dynamically create a new class with custom name here so Ray picks it up
# correctly in actor metadata table and observability stack.
self._cached_actor_def = ray.remote(
type(
self.actor_name,
(ReplicaActor,),
dict(ReplicaActor.__dict__),
)
)
return self._cached_actor_def
@classmethod
def from_proto(cls, proto: DeploymentInfoProto):
deployment_config = (
DeploymentConfig.from_proto(proto.deployment_config)
if proto.deployment_config
else None
)
target_capacity = proto.target_capacity if proto.target_capacity != -1 else None
target_capacity_direction = TargetCapacityDirectionProto.Name(
proto.target_capacity_direction
)
if target_capacity_direction == "UNSET":
target_capacity_direction = None
else:
target_capacity_direction = TargetCapacityDirection(
target_capacity_direction
)
data = {
"deployment_config": deployment_config,
"replica_config": ReplicaConfig.from_proto(
proto.replica_config,
deployment_config.needs_pickle() if deployment_config else True,
),
"start_time_ms": proto.start_time_ms,
"actor_name": proto.actor_name if proto.actor_name != "" else None,
"version": proto.version if proto.version != "" else None,
"end_time_ms": proto.end_time_ms if proto.end_time_ms != 0 else None,
"deployer_job_id": ray.get_runtime_context().get_job_id(),
"target_capacity": target_capacity,
"target_capacity_direction": target_capacity_direction,
}
return cls(**data)
def to_proto(self):
data = {
"start_time_ms": self.start_time_ms,
"actor_name": self.actor_name,
"version": self.version,
"end_time_ms": self.end_time_ms,
}
if self.deployment_config:
data["deployment_config"] = self.deployment_config.to_proto()
if self.replica_config:
data["replica_config"] = self.replica_config.to_proto()
if self.target_capacity is None:
data["target_capacity"] = -1
else:
data["target_capacity"] = self.target_capacity
if self.target_capacity_direction is None:
data["target_capacity_direction"] = TargetCapacityDirectionProto.UNSET
else:
data["target_capacity_direction"] = self.target_capacity_direction.name
return DeploymentInfoProto(**data)
| DeploymentInfo |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 160792,
"end": 162517
} | class ____(Response):
"""
Response of tasks.close endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "close"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, **kwargs):
super(CloseResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| CloseResponse |
python | automl__auto-sklearn | autosklearn/pipeline/implementations/CategoryShift.py | {
"start": 141,
"end": 1516
} | class ____(BaseEstimator, TransformerMixin):
"""Add 3 to every category."""
def __init__(self, random_state=None):
self.random_state = random_state
def _convert_and_check_X(self, X):
X_data = X.data if sparse.issparse(X) else X
# Check if data is numeric and positive
if X_data.dtype.kind not in set("buif") or np.nanmin(X_data) < 0:
raise ValueError(
"Categories should be non-negative numbers. "
"NOTE: floats will be casted to integers."
)
# Use check_array to make sure we are using the right kind of sparse array
# Notice that we cannot convert the array to integer right now. That would get
# rid of the np.nans and we need them later on for the imputation.
X = check_array(X, accept_sparse="csc", force_all_finite=False, copy=True)
return X
def fit(self, X, y=None):
self._convert_and_check_X(X)
return self
def transform(self, X):
X = self._convert_and_check_X(X)
# Increment everything by three to account for the fact that
# np.NaN will get an index of two, and coalesced values will get index of
# one, index of zero is not assigned to also work with sparse data
X_data = X.data if sparse.issparse(X) else X
X_data += 3
return X
| CategoryShift |
python | mlflow__mlflow | mlflow/types/llm.py | {
"start": 21347,
"end": 22656
} | class ____(_BaseDataclass):
"""
Message content token with log probability information.
Args:
token: The token.
logprob: The log probability of this token, if it is within the top
20 most likely tokens. Otherwise, the value -9999.0 is used to
signify that the token is very unlikely.
bytes: A list of integers representing the UTF-8 bytes representation
of the token. Useful in instances where characters are represented
by multiple tokens and their byte representations must be combined
to generate the correct text representation. Can be null if there
is no bytes representation for the token.
top_logprobs: List of the most likely tokens and their log probability,
at this token position. In rare cases, there may be fewer than the
number of requested top_logprobs returned.
"""
token: str
logprob: float
top_logprobs: list[TopTokenLogProb]
bytes: list[int] | None = None
def __post_init__(self):
self._validate_field("token", str, True)
self._validate_field("logprob", float, True)
self._convert_dataclass_list("top_logprobs", TopTokenLogProb)
self._validate_list("bytes", int, False)
@dataclass
| TokenLogProb |
python | lxml__lxml | src/lxml/tests/test_threading.py | {
"start": 252,
"end": 13541
} | class ____(HelperTestCase):
"""Threading tests"""
etree = etree
def _run_thread(self, func):
thread = threading.Thread(target=func)
thread.start()
thread.join()
def _run_threads(self, count, func, main_func=None):
sync = threading.Event()
lock = threading.Lock()
counter = dict(started=0, finished=0, failed=0)
def sync_start(func):
with lock:
started = counter['started'] + 1
counter['started'] = started
if started < count + (main_func is not None):
sync.wait(4) # wait until the other threads have started up
assert sync.is_set()
sync.set() # all waiting => go!
try:
func()
except:
with lock:
counter['failed'] += 1
raise
else:
with lock:
counter['finished'] += 1
threads = [threading.Thread(target=sync_start, args=(func,)) for _ in range(count)]
for thread in threads:
thread.start()
if main_func is not None:
sync_start(main_func)
for thread in threads:
thread.join()
self.assertEqual(0, counter['failed'])
self.assertEqual(counter['finished'], counter['started'])
def test_subtree_copy_thread(self):
tostring = self.etree.tostring
XML = self.etree.XML
xml = b"<root><threadtag/></root>"
main_root = XML(b"<root/>")
def run_thread():
thread_root = XML(xml)
main_root.append(thread_root[0])
del thread_root
self._run_thread(run_thread)
self.assertEqual(xml, tostring(main_root))
def test_main_xslt_in_thread(self):
XML = self.etree.XML
style = XML(b'''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<foo><xsl:copy><xsl:value-of select="/a/b/text()" /></xsl:copy></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
result = []
def run_thread():
root = XML(b'<a><b>B</b><c>C</c></a>')
result.append( st(root) )
self._run_thread(run_thread)
self.assertEqual('''\
<?xml version="1.0"?>
<foo><a>B</a></foo>
''',
str(result[0]))
def test_thread_xslt(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(b'<a><b>B</b><c>C</c></a>')
def run_thread():
style = XML(b'''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<foo><xsl:copy><xsl:value-of select="/a/b/text()" /></xsl:copy></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
root.append( st(root).getroot() )
self._run_thread(run_thread)
self.assertEqual(b'<a><b>B</b><c>C</c><foo><a>B</a></foo></a>',
tostring(root))
def test_thread_xslt_parsing_error_log(self):
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="tag" />
<!-- extend time for parsing + transform -->
''' + '\n'.join('<xsl:template match="tag%x" />' % i for i in range(200)) + '''
<xsl:UnExpectedElement />
</xsl:stylesheet>''')
self.assertRaises(etree.XSLTParseError,
etree.XSLT, style)
error_logs = []
def run_thread():
try:
etree.XSLT(style)
except etree.XSLTParseError as e:
error_logs.append(e.error_log)
else:
self.assertFalse(True, "XSLT parsing should have failed but didn't")
self._run_threads(16, run_thread)
self.assertEqual(16, len(error_logs))
last_log = None
for log in error_logs:
self.assertTrue(len(log))
if last_log is not None:
self.assertEqual(len(last_log), len(log))
self.assertTrue(len(log) >= 2, len(log))
for error in log:
self.assertTrue(':ERROR:XSLT:' in str(error), str(error))
self.assertTrue(any('UnExpectedElement' in str(error) for error in log), log)
last_log = log
def test_thread_xslt_apply_error_log(self):
tree = self.parse('<tagFF/>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template name="tag0">
<xsl:message terminate="yes">FAIL</xsl:message>
</xsl:template>
<!-- extend time for parsing + transform -->
''' + '\n'.join('<xsl:template match="tag%X" name="tag%x"> <xsl:call-template name="tag%x" /> </xsl:template>' % (i, i, i-1)
for i in range(1, 256)) + '''
</xsl:stylesheet>''')
self.assertRaises(etree.XSLTApplyError,
etree.XSLT(style), tree)
error_logs = []
def run_thread():
transform = etree.XSLT(style)
try:
transform(tree)
except etree.XSLTApplyError:
error_logs.append(transform.error_log)
else:
self.assertFalse(True, "XSLT parsing should have failed but didn't")
self._run_threads(16, run_thread)
self.assertEqual(16, len(error_logs))
last_log = None
for log in error_logs:
self.assertTrue(len(log))
if last_log is not None:
self.assertEqual(len(last_log), len(log))
self.assertEqual(1, len(log))
for error in log:
self.assertTrue(':ERROR:XSLT:' in str(error))
last_log = log
def test_thread_xslt_attr_replace(self):
# this is the only case in XSLT where the result tree can be
# modified in-place
XML = self.etree.XML
tostring = self.etree.tostring
style = self.etree.XSLT(XML(b'''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<root class="abc">
<xsl:copy-of select="@class" />
<xsl:attribute name="class">xyz</xsl:attribute>
</root>
</xsl:template>
</xsl:stylesheet>'''))
result = []
def run_thread():
root = XML(b'<ROOT class="ABC" />')
result.append( style(root).getroot() )
self._run_thread(run_thread)
self.assertEqual(b'<root class="xyz"/>',
tostring(result[0]))
def test_thread_create_xslt(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(b'<a><b>B</b><c>C</c></a>')
stylesheets = []
def run_thread():
style = XML(b'''\
<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output method="xml" />
<xsl:template match="/">
<div id="test">
<xsl:apply-templates/>
</div>
</xsl:template>
</xsl:stylesheet>''')
stylesheets.append( etree.XSLT(style) )
self._run_thread(run_thread)
st = stylesheets[0]
result = tostring( st(root) )
self.assertEqual(b'<div id="test">BC</div>',
result)
def test_thread_error_log(self):
XML = self.etree.XML
expected_error = [self.etree.ErrorTypes.ERR_TAG_NAME_MISMATCH]
children = "<a>test</a>" * 100
def parse_error_test(thread_no):
tag = "tag%d" % thread_no
xml = "<%s>%s</%s>" % (tag, children, tag.upper())
parser = self.etree.XMLParser()
for _ in range(10):
errors = None
try:
XML(xml, parser)
except self.etree.ParseError:
e = sys.exc_info()[1]
errors = e.error_log.filter_types(expected_error)
self.assertTrue(errors, "Expected error not found")
for error in errors:
self.assertTrue(
tag in error.message and tag.upper() in error.message,
"%s and %s not found in '%s'" % (
tag, tag.upper(), error.message))
self.etree.clear_error_log()
threads = []
for thread_no in range(1, 10):
t = threading.Thread(target=parse_error_test,
args=(thread_no,))
threads.append(t)
t.start()
parse_error_test(0)
for t in threads:
t.join()
def test_thread_mix(self):
XML = self.etree.XML
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
xml = b'<a><b>B</b><c xmlns="test">C</c></a>'
root = XML(xml)
fragment = XML(b"<other><tags/></other>")
result = self.etree.Element("{myns}root", att = "someval")
def run_XML():
thread_root = XML(xml)
result.append(thread_root[0])
result.append(thread_root[-1])
def run_parse():
thread_root = self.etree.parse(BytesIO(xml)).getroot()
result.append(thread_root[0])
result.append(thread_root[-1])
def run_move_main():
result.append(fragment[0])
def run_build():
result.append(
Element("{myns}foo", attrib={'{test}attr':'val'}))
SubElement(result, "{otherns}tasty")
def run_xslt():
style = XML(b'''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<xsl:copy><foo><xsl:value-of select="/a/b/text()" /></foo></xsl:copy>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
result.append( st(root).getroot() )
for test in (run_XML, run_parse, run_move_main, run_xslt, run_build):
tostring(result)
self._run_thread(test)
self.assertEqual(
b'<ns0:root xmlns:ns0="myns" att="someval"><b>B</b>'
b'<c xmlns="test">C</c><b>B</b><c xmlns="test">C</c><tags/>'
b'<a><foo>B</foo></a>'
b'<ns0:foo xmlns:ns1="test" ns1:attr="val"/>'
b'<ns1:tasty xmlns:ns1="otherns"/></ns0:root>',
tostring(result))
def strip_first():
root = Element("newroot")
root.append(result[0])
while len(result):
self._run_thread(strip_first)
self.assertEqual(
b'<ns0:root xmlns:ns0="myns" att="someval"/>',
tostring(result))
def test_concurrent_attribute_names_in_dicts(self):
SubElement = self.etree.SubElement
names = list('abcdefghijklmnop')
runs_per_name = range(50)
result_matches = re.compile(
br'<thread_root>'
br'(?:<[a-p]{5} thread_attr_[a-p]="value" thread_attr2_[a-p]="value2"\s?/>)+'
br'</thread_root>').match
def testrun():
for _ in range(3):
root = self.etree.Element('thread_root')
for name in names:
tag_name = name * 5
new = []
for _ in runs_per_name:
el = SubElement(root, tag_name, {'thread_attr_' + name: 'value'})
new.append(el)
for el in new:
el.set('thread_attr2_' + name, 'value2')
s = etree.tostring(root)
self.assertTrue(result_matches(s))
# first, run only in sub-threads
self._run_threads(10, testrun)
# then, additionally include the main thread (and its parent dict)
self._run_threads(10, testrun, main_func=testrun)
def test_concurrent_proxies(self):
XML = self.etree.XML
root = XML(b'<root><a>A</a><b xmlns="test">B</b><c/></root>')
child_count = len(root)
def testrun():
for i in range(10000):
el = root[i%child_count]
del el
self._run_threads(10, testrun)
def test_concurrent_class_lookup(self):
XML = self.etree.XML
class TestElement(etree.ElementBase):
pass
class MyLookup(etree.CustomElementClassLookup):
repeat = range(100)
def lookup(self, t, d, ns, name):
count = 0
for i in self.repeat:
# allow other threads to run
count += 1
return TestElement
parser = self.etree.XMLParser()
parser.set_element_class_lookup(MyLookup())
root = XML(b'<root><a>A</a><b xmlns="test">B</b><c/></root>',
parser)
child_count = len(root)
def testrun():
for i in range(1000):
el = root[i%child_count]
del el
self._run_threads(10, testrun)
| ThreadingTestCase |
python | django__django | django/contrib/postgres/lookups.py | {
"start": 1034,
"end": 1125
} | class ____(HasKeys):
lookup_name = "has_any_keys"
postgres_operator = "?|"
| HasAnyKeys |
python | huggingface__transformers | src/transformers/models/bros/modeling_bros.py | {
"start": 28225,
"end": 32626
} | class ____(BrosPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bros = BrosModel(config)
classifier_dropout = (
config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
bbox: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
bbox_first_token_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
r"""
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
bbox_first_token_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosForTokenClassification
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bros(
input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if bbox_first_token_mask is not None:
bbox_first_token_mask = bbox_first_token_mask.view(-1)
loss = loss_fct(
logits.view(-1, self.num_labels)[bbox_first_token_mask], labels.view(-1)[bbox_first_token_mask]
)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
Bros Model with a token classification head on top (initial_token_layers and subsequent_token_layer on top of the
hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. The initial_token_classifier is used to
predict the first token of each entity, and the subsequent_token_classifier is used to predict the subsequent
tokens within an entity. Compared to BrosForTokenClassification, this model is more robust to serialization errors
since it predicts next token from one token.
"""
)
| BrosForTokenClassification |
python | getsentry__sentry | src/sentry/notifications/platform/target.py | {
"start": 534,
"end": 730
} | class ____(Exception):
pass
INTEGRATION_PROVIDER_KEYS = [
NotificationProviderKey.SLACK,
NotificationProviderKey.DISCORD,
NotificationProviderKey.MSTEAMS,
]
| NotificationTargetError |
python | aio-libs__aiohttp | aiohttp/compression_utils.py | {
"start": 814,
"end": 962
} | class ____(Protocol):
def compress(self, data: Buffer) -> bytes: ...
def flush(self, mode: int = ..., /) -> bytes: ...
| ZLibCompressObjProtocol |
python | getsentry__sentry | tests/sentry/integrations/vsts/test_provider.py | {
"start": 10827,
"end": 12541
} | class ____(TestCase):
client_secret = "12345678"
def setUp(self) -> None:
self.identity_provider_model = self.create_identity_provider(type="vsts")
self.identity = Identity.objects.create(
idp=self.identity_provider_model,
user=self.user,
external_id="vsts_id",
data={
"access_token": "123456789",
"token_type": "token_type",
"expires": 12345678,
"refresh_token": "n354678",
},
)
self.provider = VSTSIdentityProvider()
@pytest.fixture(autouse=True)
def patch_get_oauth_client_secret(self) -> Generator[None]:
with patch.object(
VSTSIdentityProvider, "get_oauth_client_secret", return_value=self.client_secret
):
yield
@responses.activate
def test_refresh_identity(self) -> None:
refresh_data = {
"access_token": "access token for this user",
"token_type": "type of token",
"expires": 1234567,
"refresh_token": "new refresh token to use when the token has timed out",
}
responses.add(
responses.POST, "https://app.vssps.visualstudio.com/oauth2/token", json=refresh_data
)
self.provider.refresh_identity(self.identity, redirect_url="redirect_url")
assert len(responses.calls) == 1
new_identity = Identity.objects.get(id=self.identity.id)
assert new_identity.data["access_token"] == refresh_data["access_token"]
assert new_identity.data["token_type"] == refresh_data["token_type"]
assert new_identity.data["expires"] <= int(time())
| VstsIdentityProviderTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 11871,
"end": 11953
} | class ____:
def case(self, value: Any) -> Iterable[Any]:
return []
| Base3 |
python | pyca__cryptography | src/cryptography/x509/extensions.py | {
"start": 34776,
"end": 39109
} | class ____(ExtensionType):
oid = ExtensionOID.KEY_USAGE
def __init__(
self,
digital_signature: bool,
content_commitment: bool,
key_encipherment: bool,
data_encipherment: bool,
key_agreement: bool,
key_cert_sign: bool,
crl_sign: bool,
encipher_only: bool,
decipher_only: bool,
) -> None:
if not key_agreement and (encipher_only or decipher_only):
raise ValueError(
"encipher_only and decipher_only can only be true when "
"key_agreement is true"
)
self._digital_signature = digital_signature
self._content_commitment = content_commitment
self._key_encipherment = key_encipherment
self._data_encipherment = data_encipherment
self._key_agreement = key_agreement
self._key_cert_sign = key_cert_sign
self._crl_sign = crl_sign
self._encipher_only = encipher_only
self._decipher_only = decipher_only
@property
def digital_signature(self) -> bool:
return self._digital_signature
@property
def content_commitment(self) -> bool:
return self._content_commitment
@property
def key_encipherment(self) -> bool:
return self._key_encipherment
@property
def data_encipherment(self) -> bool:
return self._data_encipherment
@property
def key_agreement(self) -> bool:
return self._key_agreement
@property
def key_cert_sign(self) -> bool:
return self._key_cert_sign
@property
def crl_sign(self) -> bool:
return self._crl_sign
@property
def encipher_only(self) -> bool:
if not self.key_agreement:
raise ValueError(
"encipher_only is undefined unless key_agreement is true"
)
else:
return self._encipher_only
@property
def decipher_only(self) -> bool:
if not self.key_agreement:
raise ValueError(
"decipher_only is undefined unless key_agreement is true"
)
else:
return self._decipher_only
def __repr__(self) -> str:
try:
encipher_only = self.encipher_only
decipher_only = self.decipher_only
except ValueError:
# Users found None confusing because even though encipher/decipher
# have no meaning unless key_agreement is true, to construct an
# instance of the class you still need to pass False.
encipher_only = False
decipher_only = False
return (
f"<KeyUsage(digital_signature={self.digital_signature}, "
f"content_commitment={self.content_commitment}, "
f"key_encipherment={self.key_encipherment}, "
f"data_encipherment={self.data_encipherment}, "
f"key_agreement={self.key_agreement}, "
f"key_cert_sign={self.key_cert_sign}, crl_sign={self.crl_sign}, "
f"encipher_only={encipher_only}, decipher_only={decipher_only})>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, KeyUsage):
return NotImplemented
return (
self.digital_signature == other.digital_signature
and self.content_commitment == other.content_commitment
and self.key_encipherment == other.key_encipherment
and self.data_encipherment == other.data_encipherment
and self.key_agreement == other.key_agreement
and self.key_cert_sign == other.key_cert_sign
and self.crl_sign == other.crl_sign
and self._encipher_only == other._encipher_only
and self._decipher_only == other._decipher_only
)
def __hash__(self) -> int:
return hash(
(
self.digital_signature,
self.content_commitment,
self.key_encipherment,
self.data_encipherment,
self.key_agreement,
self.key_cert_sign,
self.crl_sign,
self._encipher_only,
self._decipher_only,
)
)
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
| KeyUsage |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.