language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | networkx__networkx | networkx/algorithms/tests/test_cycles.py | {
"start": 25929,
"end": 30906
} | class ____:
@classmethod
def setup_class(cls):
cls.nodes = [0, 1, 2, 3]
cls.edges = [(-1, 0), (0, 1), (1, 0), (1, 0), (2, 1), (3, 1)]
def test_graph_nocycle(self):
G = nx.Graph(self.edges)
pytest.raises(nx.exception.NetworkXNoCycle, nx.find_cycle, G, self.nodes)
def test_graph_cycle(self):
G = nx.Graph(self.edges)
G.add_edge(2, 0)
x = list(nx.find_cycle(G, self.nodes))
x_ = [(0, 1), (1, 2), (2, 0)]
assert x == x_
def test_graph_orientation_none(self):
G = nx.Graph(self.edges)
G.add_edge(2, 0)
x = list(nx.find_cycle(G, self.nodes, orientation=None))
x_ = [(0, 1), (1, 2), (2, 0)]
assert x == x_
def test_graph_orientation_original(self):
G = nx.Graph(self.edges)
G.add_edge(2, 0)
x = list(nx.find_cycle(G, self.nodes, orientation="original"))
x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 0, FORWARD)]
assert x == x_
def test_digraph(self):
G = nx.DiGraph(self.edges)
x = list(nx.find_cycle(G, self.nodes))
x_ = [(0, 1), (1, 0)]
assert x == x_
def test_digraph_orientation_none(self):
G = nx.DiGraph(self.edges)
x = list(nx.find_cycle(G, self.nodes, orientation=None))
x_ = [(0, 1), (1, 0)]
assert x == x_
def test_digraph_orientation_original(self):
G = nx.DiGraph(self.edges)
x = list(nx.find_cycle(G, self.nodes, orientation="original"))
x_ = [(0, 1, FORWARD), (1, 0, FORWARD)]
assert x == x_
def test_multigraph(self):
G = nx.MultiGraph(self.edges)
x = list(nx.find_cycle(G, self.nodes))
x_ = [(0, 1, 0), (1, 0, 1)] # or (1, 0, 2)
# Hash randomization...could be any edge.
assert x[0] == x_[0]
assert x[1][:2] == x_[1][:2]
def test_multidigraph(self):
G = nx.MultiDiGraph(self.edges)
x = list(nx.find_cycle(G, self.nodes))
x_ = [(0, 1, 0), (1, 0, 0)] # (1, 0, 1)
assert x[0] == x_[0]
assert x[1][:2] == x_[1][:2]
def test_digraph_ignore(self):
G = nx.DiGraph(self.edges)
x = list(nx.find_cycle(G, self.nodes, orientation="ignore"))
x_ = [(0, 1, FORWARD), (1, 0, FORWARD)]
assert x == x_
def test_digraph_reverse(self):
G = nx.DiGraph(self.edges)
x = list(nx.find_cycle(G, self.nodes, orientation="reverse"))
x_ = [(1, 0, REVERSE), (0, 1, REVERSE)]
assert x == x_
def test_multidigraph_ignore(self):
G = nx.MultiDiGraph(self.edges)
x = list(nx.find_cycle(G, self.nodes, orientation="ignore"))
x_ = [(0, 1, 0, FORWARD), (1, 0, 0, FORWARD)] # or (1, 0, 1, 1)
assert x[0] == x_[0]
assert x[1][:2] == x_[1][:2]
assert x[1][3] == x_[1][3]
def test_multidigraph_ignore2(self):
# Loop traversed an edge while ignoring its orientation.
G = nx.MultiDiGraph([(0, 1), (1, 2), (1, 2)])
x = list(nx.find_cycle(G, [0, 1, 2], orientation="ignore"))
x_ = [(1, 2, 0, FORWARD), (1, 2, 1, REVERSE)]
assert x == x_
def test_multidigraph_original(self):
# Node 2 doesn't need to be searched again from visited from 4.
# The goal here is to cover the case when 2 to be researched from 4,
# when 4 is visited from the first time (so we must make sure that 4
# is not visited from 2, and hence, we respect the edge orientation).
G = nx.MultiDiGraph([(0, 1), (1, 2), (2, 3), (4, 2)])
pytest.raises(
nx.exception.NetworkXNoCycle,
nx.find_cycle,
G,
[0, 1, 2, 3, 4],
orientation="original",
)
def test_dag(self):
G = nx.DiGraph([(0, 1), (0, 2), (1, 2)])
pytest.raises(
nx.exception.NetworkXNoCycle, nx.find_cycle, G, orientation="original"
)
x = list(nx.find_cycle(G, orientation="ignore"))
assert x == [(0, 1, FORWARD), (1, 2, FORWARD), (0, 2, REVERSE)]
def test_prev_explored(self):
# https://github.com/networkx/networkx/issues/2323
G = nx.DiGraph()
G.add_edges_from([(1, 0), (2, 0), (1, 2), (2, 1)])
pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G, source=0)
x = list(nx.find_cycle(G, 1))
x_ = [(1, 2), (2, 1)]
assert x == x_
x = list(nx.find_cycle(G, 2))
x_ = [(2, 1), (1, 2)]
assert x == x_
x = list(nx.find_cycle(G))
x_ = [(1, 2), (2, 1)]
assert x == x_
def test_no_cycle(self):
# https://github.com/networkx/networkx/issues/2439
G = nx.DiGraph()
G.add_edges_from([(1, 2), (2, 0), (3, 1), (3, 2)])
pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G, source=0)
pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G)
def assert_basis_equal(a, b):
assert sorted(a) == sorted(b)
| TestFindCycle |
python | getsentry__sentry | src/sentry/relocation/services/relocation_export/model.py | {
"start": 230,
"end": 500
} | class ____(pydantic.BaseModel):
relocation_uuid: str
requesting_region_name: str
replying_region_name: str
org_slug: str
# encrypted_bytes excluded, as receivers are expected to manually read them from filestore.
| RelocationExportReplyWithExportParameters |
python | walkccc__LeetCode | solutions/3472. Longest Palindromic Subsequence After at Most K Operations/3472.py | {
"start": 0,
"end": 744
} | class ____:
# Similar to 516. Longest Palindromic Subsequence
def longestPalindromicSubsequence(self, s: str, k: int) -> int:
@functools.lru_cache(None)
def dp(i: int, j: int, op: int) -> int:
"""Returns the length of LPS(s[i..j]) with at most `op` operations."""
if i > j:
return 0
if i == j:
return 1
if s[i] == s[j]:
return 2 + dp(i + 1, j - 1, op)
res = max(dp(i + 1, j, op), dp(i, j - 1, op))
cost = self._getCost(s[i], s[j])
if cost <= op:
res = max(res, 2 + dp(i + 1, j - 1, op - cost))
return res
return dp(0, len(s) - 1, k)
def _getCost(self, a: str, b: str) -> int:
dist = abs(ord(a) - ord(b))
return min(dist, 26 - dist)
| Solution |
python | doocs__leetcode | solution/0700-0799/0702.Search in a Sorted Array of Unknown Size/Solution.py | {
"start": 182,
"end": 570
} | class ____:
def search(self, reader: "ArrayReader", target: int) -> int:
r = 1
while reader.get(r) < target:
r <<= 1
l = r >> 1
while l < r:
mid = (l + r) >> 1
if reader.get(mid) >= target:
r = mid
else:
l = mid + 1
return l if reader.get(l) == target else -1
| Solution |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 11754,
"end": 11857
} | class ____(models.Model):
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
| AdminProfile |
python | bokeh__bokeh | src/bokeh/server/auth_provider.py | {
"start": 7751,
"end": 9588
} | class ____(AuthProvider):
''' A default no-auth AuthProvider.
All of the properties of this provider return None.
'''
@property
def get_user(self):
return None
@property
def get_user_async(self):
return None
@property
def login_url(self):
return None
@property
def get_login_url(self):
return None
@property
def login_handler(self):
return None
@property
def logout_url(self):
return None
@property
def logout_handler(self):
return None
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def load_auth_module(module_path: PathLike) -> ModuleType:
''' Load a Python source file at a given path as a module.
Arguments:
module_path (str): path to a Python source file
Returns
module
'''
module_name = "bokeh.auth_" + make_globally_unique_id().replace('-', '')
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def probably_relative_url(url: str) -> bool:
''' Return True if a URL is not one of the common absolute URL formats.
Arguments:
url (str): a URL string
Returns
bool
'''
return not url.startswith(("http://", "https://", "//"))
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| NullAuth |
python | milvus-io__pymilvus | tests/test_decorators.py | {
"start": 261,
"end": 5157
} | class ____:
def mock_failure(self, code: grpc.StatusCode):
if code in IGNORE_RETRY_CODES:
raise MockUnRetriableError(code)
if code == MockUnavailableError().code():
raise MockUnavailableError
if code == MockDeadlineExceededError().code():
raise MockDeadlineExceededError
def mock_milvus_exception(self, code: ErrorCode):
if code == ErrorCode.FORCE_DENY:
raise MockForceDenyError
if code == ErrorCode.RATE_LIMIT:
raise MockRateLimitError
raise MilvusException(ErrorCode.UNEXPECTED_ERROR, "unexpected error")
@pytest.mark.parametrize("times", [0, 1, 2, 3])
def test_retry_decorators_unavailable(self, times):
self.count_test_retry_decorators_Unavailable = 0
@retry_on_rpc_failure(retry_times=times)
def test_api(self, code):
self.count_test_retry_decorators_Unavailable += 1
self.mock_failure(code)
with pytest.raises(MilvusException, match="unavailable"):
test_api(self, grpc.StatusCode.UNAVAILABLE)
# the first execute + retry times
assert self.count_test_retry_decorators_Unavailable == times + 1
def test_retry_decorators_timeout(self):
self.count_test_retry_decorators_timeout = 0
@retry_on_rpc_failure()
def test_api(self, code, timeout=None):
self.count_test_retry_decorators_timeout += 1
time.sleep(1)
self.mock_failure(code)
with pytest.raises(MilvusException):
test_api(self, grpc.StatusCode.UNAVAILABLE, timeout=1)
assert self.count_test_retry_decorators_timeout == 1
@pytest.mark.skip("Do not open this unless you have loads of time, get some coffee and wait")
def test_retry_decorators_default_behaviour(self):
self.test_retry_decorators_default_retry_times = 0
@retry_on_rpc_failure()
def test_api(self, code):
self.test_retry_decorators_default_retry_times += 1
self.mock_failure(code)
with pytest.raises(MilvusException):
test_api(self, grpc.StatusCode.UNAVAILABLE)
assert self.test_retry_decorators_default_retry_times == 7 + 1
def test_retry_decorators_force_deny(self):
self.execute_times = 0
@retry_on_rpc_failure()
def test_api(self, code):
self.execute_times += 1
self.mock_milvus_exception(code)
with pytest.raises(MilvusException, match="force deny"):
test_api(self, ErrorCode.FORCE_DENY)
# the first execute + 0 retry times
assert self.execute_times == 1
def test_retry_decorators_set_retry_times(self):
self.count_retry_times = 0
@retry_on_rpc_failure()
def test_api(self, code, retry_on_rate_limit, **kwargs):
self.count_retry_times += 1
self.mock_milvus_exception(code)
with pytest.raises(MilvusException):
test_api(self, ErrorCode.RATE_LIMIT, retry_on_rate_limit=True, retry_times=3)
# the first execute + 0 retry times
assert self.count_retry_times == 3 + 1
@pytest.mark.parametrize("times", [0, 1, 2, 3])
def test_retry_decorators_rate_limit_without_retry(self, times):
self.count_test_retry_decorators_force_deny = 0
@retry_on_rpc_failure(retry_times=times)
def test_api(self, code, retry_on_rate_limit):
self.count_test_retry_decorators_force_deny += 1
self.mock_milvus_exception(code)
with pytest.raises(MilvusException, match="rate limit"):
test_api(self, ErrorCode.RATE_LIMIT, retry_on_rate_limit=False)
# the first execute + 0 retry times
assert self.count_test_retry_decorators_force_deny == 1
@pytest.mark.parametrize("times", [0, 1, 2, 3])
def test_retry_decorators_rate_limit_with_retry(self, times):
self.count_test_retry_decorators_force_deny = 0
@retry_on_rpc_failure(retry_times=times)
def test_api(self, code, retry_on_rate_limit):
self.count_test_retry_decorators_force_deny += 1
self.mock_milvus_exception(code)
with pytest.raises(MilvusException, match="rate limit"):
test_api(self, ErrorCode.RATE_LIMIT, retry_on_rate_limit=True)
# the first execute + retry times
assert self.count_test_retry_decorators_force_deny == times + 1
@pytest.mark.parametrize("code", IGNORE_RETRY_CODES)
def test_donot_retry_codes(self, code):
self.count_test_donot_retry = 0
@retry_on_rpc_failure()
def test_api(self, code):
self.count_test_donot_retry += 1
self.mock_failure(code)
with pytest.raises(grpc.RpcError):
test_api(self, code)
# no retry
assert self.count_test_donot_retry == 1
| TestDecorators |
python | h5py__h5py | h5py/tests/test_file.py | {
"start": 8974,
"end": 10043
} | class ____(TestCase):
"""
Feature: File mode can be retrieved via file.mode
"""
def test_mode_attr(self):
""" Mode equivalent can be retrieved via property """
fname = self.mktemp()
with File(fname, 'w') as f:
self.assertEqual(f.mode, 'r+')
with File(fname, 'r') as f:
self.assertEqual(f.mode, 'r')
def test_mode_external(self):
""" Mode property works for files opened via external links
Issue 190.
"""
fname1 = self.mktemp()
fname2 = self.mktemp()
f1 = File(fname1, 'w')
f1.close()
f2 = File(fname2, 'w')
try:
f2['External'] = h5py.ExternalLink(fname1, '/')
f3 = f2['External'].file
self.assertEqual(f3.mode, 'r+')
finally:
f2.close()
f3.close()
f2 = File(fname2, 'r')
try:
f3 = f2['External'].file
self.assertEqual(f3.mode, 'r')
finally:
f2.close()
f3.close()
| TestModes |
python | pytorch__pytorch | torch/fx/passes/shape_prop.py | {
"start": 532,
"end": 3294
} | class ____(NamedTuple):
# TensorMetadata is a structure containing pertinent information
# about a tensor within a PyTorch program.
# General Tensor metadata
shape: torch.Size
dtype: torch.dtype
requires_grad: bool
stride: tuple[int, ...]
memory_format: Optional[torch.memory_format]
# Quantization metadata
is_quantized: bool
qparams: dict[str, Any]
# When include_contiguity is True, we will set contiguity when its always true for the tensor.
# Some tensors can represent both contiguous and non-contiguous tensors. e.g: (u0, u1) with (u2, u3).
# In such situation contiguity is not set. We could also make it a tri-state i.e: (def_contiguous,
# def_not_contiguous and unknown).
def _extract_tensor_metadata(
result: torch.Tensor, include_contiguity=True
) -> TensorMetadata:
"""
Extract a TensorMetadata NamedTuple describing `result`.
"""
shape = result.shape
dtype = result.dtype
requires_grad = result.requires_grad
stride = result.stride() if not is_sparse_any(result) else ()
memory_format = None
if include_contiguity and not is_sparse_any(result):
memory_formats = (
torch.contiguous_format,
torch.channels_last,
torch.channels_last_3d,
)
for query_format in memory_formats:
if is_contiguous_for_memory_format_or_false(
result, memory_format=query_format
):
memory_format = query_format
break
is_quantized = result.is_quantized
qparams: dict[str, Any] = {}
if is_quantized:
qscheme = result.qscheme()
qparams["qscheme"] = qscheme
if qscheme in (torch.per_tensor_affine, torch.per_tensor_symmetric):
qparams["scale"] = result.q_scale() # type: ignore[assignment]
qparams["zero_point"] = result.q_zero_point() # type: ignore[assignment]
elif qscheme in (
torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
torch.per_channel_symmetric,
):
# In this branch, scale and zero_point are expected to be tensors,
# we store the values as immutable_list in TensorMetadata for
# easier serialization downstream
qparams["scale"] = result.q_per_channel_scales().tolist() # type: ignore[assignment]
qparams["zero_point"] = result.q_per_channel_zero_points().tolist() # type: ignore[assignment]
qparams["axis"] = result.q_per_channel_axis() # type: ignore[assignment]
return TensorMetadata(
shape, dtype, requires_grad, stride, memory_format, is_quantized, qparams
)
@compatibility(is_backward_compatible=True)
| TensorMetadata |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F811_30.py | {
"start": 76,
"end": 209
} | class ____:
"""A."""
def foo(self) -> None:
"""Foo."""
bar = foo
def bar(self) -> None:
"""Bar."""
| A |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 15891,
"end": 17159
} | class ____(FunctionPass):
"""A pass to canonicalize loop exit by splitting it from function exit.
"""
_name = "canonicalize_loop_exit"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
fir = state.func_ir
cfg = compute_cfg_from_blocks(fir.blocks)
status = False
for loop in cfg.loops().values():
for exit_label in loop.exits:
if exit_label in cfg.exit_points():
self._split_exit_block(fir, cfg, exit_label)
status = True
fir._reset_analysis_variables()
vlt = postproc.VariableLifetime(fir.blocks)
fir.variable_lifetime = vlt
return status
def _split_exit_block(self, fir, cfg, exit_label):
curblock = fir.blocks[exit_label]
newlabel = exit_label + 1
newlabel = find_max_label(fir.blocks) + 1
fir.blocks[newlabel] = curblock
newblock = ir.Block(scope=curblock.scope, loc=curblock.loc)
newblock.append(ir.Jump(newlabel, loc=curblock.loc))
fir.blocks[exit_label] = newblock
# Rename all labels
fir.blocks = rename_labels(fir.blocks)
@register_pass(mutates_CFG=True, analysis_only=False)
| CanonicalizeLoopExit |
python | pandas-dev__pandas | asv_bench/benchmarks/dtypes.py | {
"start": 3173,
"end": 3559
} | class ____:
def setup(self):
self.ext_dtype = pd.Int64Dtype()
self.np_dtype = np.dtype("int64")
def time_is_extension_array_dtype_true(self):
is_extension_array_dtype(self.ext_dtype)
def time_is_extension_array_dtype_false(self):
is_extension_array_dtype(self.np_dtype)
from .pandas_vb_common import setup # noqa: F401 isort:skip
| CheckDtypes |
python | huggingface__transformers | src/transformers/models/bark/generation_configuration_bark.py | {
"start": 9574,
"end": 11209
} | class ____(GenerationConfig):
model_type = "fine_acoustics"
def __init__(
self,
temperature=1.0,
max_fine_history_length=512,
max_fine_input_length=1024,
n_fine_codebooks=8,
**kwargs,
):
"""Class that holds a generation configuration for [`BarkFineModel`].
[`BarkFineModel`] is an autoencoder model, so should not usually be used for generation. However, under the
hood, it uses `temperature` when used by [`BarkModel`]
This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the
documentation from [`GenerationConfig`] for more information.
Args:
temperature (`float`, *optional*):
The value used to modulate the next token probabilities.
max_fine_history_length (`int`, *optional*, defaults to 512):
Max length of the fine history vector.
max_fine_input_length (`int`, *optional*, defaults to 1024):
Max length of fine input vector.
n_fine_codebooks (`int`, *optional*, defaults to 8):
Number of codebooks used.
"""
super().__init__(temperature=temperature)
self.max_fine_history_length = max_fine_history_length
self.max_fine_input_length = max_fine_input_length
self.n_fine_codebooks = n_fine_codebooks
def validate(self, **kwargs):
"""
Overrides GenerationConfig.validate because BarkFineGenerationConfig don't use any parameters outside
temperature.
"""
| BarkFineGenerationConfig |
python | pytorch__pytorch | test/dynamo/test_ctx_manager.py | {
"start": 1870,
"end": 44532
} | class ____(torch._dynamo.test_case.TestCaseWithNestedGraphBreaks):
def test_no_grad(self):
def fn1(a, b):
x = a + 1
# redundant no_grad should get ignored
with torch.no_grad():
x = x + b
x = x + 2
return x
def fn2(a, b):
x = a + 1
with torch.set_grad_enabled(False):
x = x + b
x = x + 2
return x
def fn3(a, b):
x = a + 1
with torch.enable_grad():
x = x + b
x = x + 2
return x
def fn4(a, b):
x = a + 1
with torch.set_grad_enabled(True):
if torch.is_grad_enabled():
x = x + b
x = x + 2
return x
with torch.no_grad():
torch._dynamo.testing.standard_test(
self, fn=fn1, nargs=2, expected_ops=3
) # coalesced noop
torch._dynamo.testing.standard_test(
self, fn=fn2, nargs=2, expected_ops=3
) # coalesced noop
torch._dynamo.testing.standard_test(self, fn=fn3, nargs=2, expected_ops=5)
torch._dynamo.testing.standard_test(self, fn=fn4, nargs=2, expected_ops=5)
with torch.enable_grad():
torch._dynamo.testing.standard_test(self, fn=fn1, nargs=2, expected_ops=5)
torch._dynamo.testing.standard_test(self, fn=fn2, nargs=2, expected_ops=5)
torch._dynamo.testing.standard_test(
self, fn=fn3, nargs=2, expected_ops=3
) # coalesced noop
torch._dynamo.testing.standard_test(
self, fn=fn4, nargs=2, expected_ops=3
) # coalesced noop
def test_grad_mode_guard(self):
def fn(a, b):
prev_grad = torch.is_grad_enabled()
torch.set_grad_enabled(False)
a = a + 1
a.tolist() # graph break
ret = a + b
torch.set_grad_enabled(prev_grad)
return ret
a = torch.randn([3, 4])
b = torch.randn([3, 4])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
for _ in range(10):
opt_fn(a, b)
self.assertEqual(cnts.frame_count, 2)
def test_nested_grad_mode_graph_break(self):
def fn(x):
before = torch.is_grad_enabled()
with torch.set_grad_enabled(False):
torch._dynamo.graph_break()
with torch.set_grad_enabled(True):
x = torch.mul(x, 5)
torch._dynamo.graph_break()
x = torch.sqrt(x)
assert torch.is_grad_enabled()
assert not torch.is_grad_enabled()
assert torch.is_grad_enabled() == before
return x
a = torch.randn([3, 4])
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
for _ in range(10):
opt_fn(a)
self.assertEqual(cnts.frame_count, 2)
def test_torch_profiler(self):
# wrap torch.profiler.* as NullContextVariable and do nothing
def fn(x):
y = x**2
with torch.profiler.profile():
y = y + 2
with torch.profiler.record_function("my_function"):
z = y**3
z.tolist() # graph break
z = z + 1
return z
x = torch.randn((2, 2), requires_grad=True)
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res = opt_fn(x)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 2)
def test_autograd_profiler(self):
# wrap torch.autograd.profiler.* as NullContextVariable and do nothing
def fn(x):
y = x**2
with torch.autograd.profiler.profile():
y = y + 2
with torch.autograd.profiler.record_function("my_function"):
z = y**3
z.tolist() # graph break
z = z + 1
return z
x = torch.randn((2, 2), requires_grad=True)
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res = opt_fn(x)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 2)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_cuda_stream_context_manager1(self):
def fn(x):
s = torch.cuda.Stream()
x = torch.mul(x, 5)
x = torch.add(x, 2)
current_stream = torch.cuda.current_stream()
s.wait_stream(current_stream)
with torch.cuda.stream(s):
x = torch.relu(x)
current_stream.wait_stream(s)
x = torch.add(x, 1)
x = torch.cos(x)
return x
x = torch.randn((2, 2), device="cuda")
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
self.assertEqual(cnts.frame_count, 1)
self.assertExpectedInline(str(cnts.op_count), """9""")
@unittest.expectedFailure # https://github.com/pytorch/pytorch/issues/118204
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_cuda_stream_across_graph_break(self):
def fn(x):
s = torch.cuda.Stream()
x = torch.mul(x, 5)
x = torch.add(x, 2)
print("foo")
tcs = torch.cuda.stream(s)
current_stream = torch.cuda.current_stream()
s.wait_stream(current_stream)
with tcs:
x = torch.relu(x)
current_stream.wait_stream(s)
x = torch.add(x, 1)
x = torch.cos(x)
return x
x = torch.randn((2, 2), device="cuda")
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res = opt_fn(x)
self.assertEqual(ref, res)
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 9)
@unittest.expectedFailure # https://github.com/pytorch/pytorch/issues/118204
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_cuda_stream_context_manager2(self):
def fn(x, s):
x = torch.mul(x, 5)
x = torch.add(x, 2)
current_stream = torch.cuda.current_stream()
s.wait_stream(current_stream)
with torch.cuda.stream(s):
x = torch.relu(x)
current_stream.wait_stream(s)
with torch.cuda.stream(current_stream):
x = torch.relu(x)
s2 = torch.cuda.Stream()
s2.wait_stream(current_stream)
with torch.cuda.stream(s2):
x = torch.relu(x)
current_stream.wait_stream(s2)
x = torch.add(x, 1)
x = torch.cos(x)
return x
x = torch.randn((2, 2), device="cuda")
s = torch.cuda.Stream()
ref = fn(x, s)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
res = opt_fn(x, s)
self.assertEqual(ref, res)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 18)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_cuda_stream_method(self):
def fn(x):
x = torch.mul(x, 1)
x = torch.add(x, 2)
new_stream = torch.cuda.Stream()
cur_stream = torch.cuda.current_stream()
new_stream.wait_stream(cur_stream)
with torch.cuda.stream(new_stream):
x = torch.sin(x)
x = torch.add(x, 3)
cur_stream.wait_stream(new_stream)
x = torch.add(x, 4)
cur_stream.query()
cur_stream.synchronize()
with torch.cuda.stream(new_stream):
x = torch.add(x, 5)
new_stream.synchronize()
x = torch.relu(x)
x = torch.cos(x)
return x
x = torch.randn((2, 2), device="cuda")
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
self.assertEqual(cnts.frame_count, 1)
self.assertExpectedInline(str(cnts.op_count), """15""")
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_cuda_stream_compared_with_constant(self):
def fn(x):
x = torch.mul(x, 1)
x = torch.add(x, 2)
cur_stream = torch.cuda.current_stream()
if cur_stream is not None:
return x + 1
return x - 1
def fn2(x):
x = torch.mul(x, 1)
x = torch.add(x, 2)
cur_stream = torch.cuda.current_stream()
if cur_stream != "const_str":
return x + 1
return x - 1
x = torch.randn((2, 2), device="cuda")
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
opt_fn2 = torch.compile(fn2, backend=cnts, fullgraph=True)
res = opt_fn(x)
res2 = opt_fn2(x)
self.assertEqual(ref, res)
self.assertEqual(ref, res2)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_cuda_stream_compared_with_stream(self):
def fn(x, s0, s1):
if s0 == s1:
return x + 1
else:
return x - 1
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
x = torch.randn(2, 2)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
ref0 = fn(x, s0, s1)
res0 = opt_fn(x, s0, s1)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(ref0, res0)
ref1 = fn(x, s1, s1)
res1 = opt_fn(x, s1, s1)
# We have a re-compilation because of changing inputs
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(ref1, res1)
torch._dynamo.reset()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
ref1 = fn(x, s1, s1)
res1 = opt_fn(x, s1, s1)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(ref1, res1)
ref0 = fn(x, s0, s1)
res0 = opt_fn(x, s0, s1)
# We have a re-compilation because of changing inputs
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(ref0, res0)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
@unittest.skip(
"Will not support external events for now: https://github.com/pytorch/pytorch/issues/167257"
)
def test_cuda_event_reconstruct(self):
def fn(x):
e = torch.cuda.Event()
x = torch.mul(x, 5)
x = torch.add(x, 2)
return x, e
x = torch.randn((2, 2), device="cuda")
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res = opt_fn(x)
self.assertEqual(ref[0], res[0])
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 3)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
@unittest.skip(
"Will not support external events for now: https://github.com/pytorch/pytorch/issues/167257"
)
def test_cuda_event_across_graph_break(self):
def fn(x):
e = torch.cuda.Event()
e.record()
x = torch.mul(x, 5)
x = torch.add(x, 2)
print("foo")
torch.cuda.current_stream().wait_event(e)
x = torch.add(x, 1)
x = torch.cos(x)
return x, e
x = torch.randn((2, 2), device="cuda")
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
res = opt_fn(x)
self.assertEqual(ref[0], res[0])
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 10)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
@unittest.skip(
"Will not support external events for now: https://github.com/pytorch/pytorch/issues/167257"
)
def test_cuda_event_created_outside_of_graph(self):
user_stream = torch.cuda.Stream()
event = torch.cuda.Event()
foo = torch.empty((2, 2), device="cuda")
def func(foo):
event.wait()
return foo + 1, event
x = torch.randn((1024, 1024), device="cuda")
cnts = torch._dynamo.testing.CompileCounter()
def run_iters(fn, compile=False):
if compile:
fn = torch.compile(fn, backend=cnts)
for _ in range(10):
with torch.cuda.stream(user_stream):
torch.mm(x, x, out=foo)
event.record()
out = fn(foo)
# let `fn` finish reading `foo` before writing to it in the next
# iteration or `run_iters` call.
torch.cuda.current_stream().synchronize()
return out
ref = run_iters(func, compile=False)
res = run_iters(func, compile=True)
self.assertEqual(ref, res)
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 4)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
@unittest.skip(
"Will not support external events for now: https://github.com/pytorch/pytorch/issues/167257"
)
def test_cuda_event_method_create_stream_outside_of_compile(self):
def fn(x, cur_stream, new_stream):
x = torch.mul(x, 1)
x = torch.add(x, 2)
x = torch.add(x, 3)
event = cur_stream.record_event()
event.query()
new_stream.wait_event(event)
with torch.cuda.stream(new_stream):
x = torch.add(x, 4)
new_event = torch.cuda.Event()
new_event.record(new_stream)
new_event.wait(cur_stream)
x = torch.add(x, 5)
# use new event to sync
new_event.synchronize()
x = torch.relu(x)
x = torch.cos(x)
return x
x = torch.randn((2, 2), device="cuda")
cur_stream = torch.cuda.current_stream()
new_stream = torch.cuda.Stream()
ref = fn(x, cur_stream, new_stream)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
res = opt_fn(x, cur_stream, new_stream)
self.assertEqual(ref, res)
self.assertEqual(cnts.frame_count, 1)
self.assertExpectedInline(str(cnts.op_count), """16""")
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_cuda_event_method(self):
def fn(x):
x = torch.mul(x, 1)
x = torch.add(x, 2)
cur_stream = torch.cuda.current_stream()
new_stream = torch.cuda.Stream()
x = torch.add(x, 3)
event = cur_stream.record_event()
event.query()
new_stream.wait_event(event)
with torch.cuda.stream(new_stream):
x = torch.add(x, 4)
new_event = torch.Event()
new_event.record(new_stream)
new_event.wait(cur_stream)
x = torch.add(x, 5)
# use new event to sync
new_event.synchronize()
x = torch.relu(x)
x = torch.cos(x)
return x
x = torch.randn((2, 2), device="cuda")
ref = fn(x)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref, res)
self.assertEqual(cnts.frame_count, 1)
self.assertExpectedInline(str(cnts.op_count), """16""")
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_cuda_device(self):
def fn(x):
with torch.cuda.device(x.device.index - 1):
x = torch.sin(x + 1)
return x
x = torch.randn((2, 2), device="cuda")
ref = fn(x)
opt_fn = torch.compile(backend="eager", fullgraph=True)(fn)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_autograd_profiler_enabled(self):
def fn(x):
if torch.autograd._profiler_enabled():
return x + 1
else:
return x - 1
x = torch.randn((2, 2), requires_grad=True)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts)
if torch.autograd._profiler_enabled():
torch.autograd._disable_profiler()
assert not torch.autograd._profiler_enabled()
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
with torch.autograd.profiler.profile():
assert torch.autograd._profiler_enabled()
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_autocast(self):
if not torch.cuda.is_bf16_supported():
raise unittest.SkipTest("requires bf16")
class MyModule(torch.nn.Module):
def forward(self, x):
a_float32 = torch.rand((8, 8), device="cuda")
b_float32 = torch.rand((8, 8), device="cuda")
d_float32 = torch.rand((8, 8), device="cuda")
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
e_float16 = torch.mm(a_float32, b_float32)
f_float16 = torch.mm(d_float32, e_float16)
return f_float16
module = MyModule()
real = module(torch.tensor([0.5]))
real_device = real.device
real_dtype = real.dtype
graph, _ = torch._dynamo.export(module)(torch.tensor([[0.0, 0], [0, 0]]))
exported = graph(torch.tensor([0.5]))
self.assertEqual(exported.device, real_device)
self.assertEqual(exported.dtype, real_dtype)
self.assertEqual(exported.device.type, "cuda")
self.assertEqual(exported.device.index, 0)
self.assertEqual(exported.dtype, torch.bfloat16)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_cuda_amp_autocast(self):
class MyModule(torch.nn.Module):
def forward(self, x):
a_float32 = torch.rand((8, 8), device="cuda")
b_float32 = torch.rand((8, 8), device="cuda")
with torch.autocast(device_type="cuda", dtype=torch.float64):
c_float64 = torch.mm(a_float32, b_float32)
return c_float64
module = MyModule()
real = module(torch.tensor([0.5]))
real_device = real.device
real_dtype = real.dtype
graph, _ = torch._dynamo.export(module)(torch.tensor([[0.0, 0], [0, 0]]))
exported = graph(torch.tensor([0.5]))
self.assertEqual(exported.device, real_device)
self.assertEqual(exported.dtype, real_dtype)
self.assertEqual(exported.device.type, "cuda")
self.assertEqual(exported.device.index, 0)
self.assertEqual(exported.dtype, torch.float64)
def test_is_autocast_cpu_enabled(self):
def fn(a_float32, b_float32):
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
c_float16 = torch.mm(a_float32, b_float32)
if torch.is_autocast_cpu_enabled():
c_float16 = c_float16 + 1
return c_float16
a = torch.rand((8, 8))
b = torch.rand((8, 8))
ref = fn(a, b)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(a, b)
self.assertTrue(same(ref, res))
@unittest.skipIf(
not PLATFORM_SUPPORTS_FLASH_ATTENTION,
"Can't run fused SDPA on this platform",
)
def test_autocast_sdpa(self):
class MyModule(torch.nn.Module):
def forward(self, query, key, value):
with torch.autocast("cpu"):
with torch.autocast("cuda", dtype=torch.float32):
out = F.scaled_dot_product_attention(
query, key, value, None, 0.0, True
)
return out
dtype = torch.float32
seq_len_q = 1
seq_len_k = 1
head_dim = 8
query = torch.ones(
1, 8, seq_len_q, head_dim, device="cuda", dtype=dtype, requires_grad=True
)
key = torch.ones(
1, 8, seq_len_k, head_dim, device="cuda", dtype=dtype, requires_grad=True
)
value = torch.ones(
1, 8, seq_len_k, head_dim, device="cuda", dtype=dtype, requires_grad=True
)
module = MyModule()
real = module(query, key, value)
real_device = real.device
real_dtype = real.dtype
opt_mod = torch.compile(module, backend="inductor")
compiled = opt_mod(query, key, value)
self.assertEqual(compiled.device, real_device)
self.assertEqual(compiled.dtype, real_dtype)
self.assertEqual(compiled.device.type, "cuda")
self.assertEqual(compiled.device.index, 0)
self.assertEqual(compiled.dtype, torch.float32)
def test_autocast_cpu(self):
class MyModule(torch.nn.Module):
def forward(self, x):
a_float32 = torch.rand((8, 8), device="cpu")
b_float32 = torch.rand((8, 8), device="cpu")
d_float32 = torch.rand((8, 8), device="cpu")
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
e_float16 = torch.mm(a_float32, b_float32)
f_float16 = torch.mm(d_float32, e_float16)
return f_float16
module = MyModule()
real = module(torch.tensor([0.5]))
real_device = real.device
real_dtype = real.dtype
graph, _ = torch._dynamo.export(module)(torch.tensor([[0.0, 0], [0, 0]]))
exported = graph(torch.tensor([0.5]))
self.assertEqual(exported.device, real_device)
self.assertEqual(exported.dtype, real_dtype)
self.assertEqual(exported.device.type, "cpu")
self.assertEqual(exported.dtype, torch.bfloat16)
def test_autocast_cpu_graph_break(self):
class MyModule(torch.nn.Module):
def forward(self, x):
a_float32 = torch.rand((8, 8), device="cpu")
b_float32 = torch.rand((8, 8), device="cpu")
torch._dynamo.graph_break()
d_float32 = torch.rand((8, 8), device="cpu")
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
e_float16 = torch.mm(a_float32, b_float32)
torch._dynamo.graph_break()
f_float16 = torch.mm(d_float32, e_float16)
return f_float16
module = MyModule()
real = module(torch.tensor([0.5]))
real_device = real.device
real_dtype = real.dtype
opt = torch.compile(module, backend="eager")
res = opt(torch.tensor([0.5]))
self.assertEqual(res.device, real_device)
self.assertEqual(res.dtype, real_dtype)
self.assertEqual(res.device.type, "cpu")
self.assertEqual(res.dtype, torch.bfloat16)
def test_autocast_cpu_graph_break_2(self):
# Regression for: https://github.com/pytorch/pytorch/issues/93890
def fn(x):
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
x = torch.mm(x, x)
torch._dynamo.graph_break()
x = torch.relu(x)
return x
x = torch.rand([4, 4])
self.assertEqual(x.dtype, torch.float32)
res = fn(x)
opt_fn = torch.compile(fn, backend="eager")
opt_res = opt_fn(x)
self.assertTrue(torch.allclose(res, opt_res))
self.assertEqual(res.dtype, torch.bfloat16)
self.assertEqual(opt_res.dtype, torch.bfloat16)
def test_autocast_cpu_graph_break_inner_fn(self):
class MyModule(torch.nn.Module):
@staticmethod
def mm_breaks(x, y):
torch._dynamo.graph_break()
return torch.mm(x, y)
def forward(self, x):
a_float32 = torch.rand((8, 8), device="cpu")
b_float32 = torch.rand((8, 8), device="cpu")
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
torch._dynamo.graph_break()
with torch.autocast(
device_type="cpu", dtype=torch.bfloat16, enabled=False
):
torch._dynamo.graph_break()
g_float32 = torch.mm(a_float32, b_float32)
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
# Check that nested with non-inlineable function with graph break
torch._dynamo.graph_break()
f_float16_1 = self.mm_breaks(a_float32, b_float32)
# We remember to exit the inner autocast correctly to outer
# even after graph breaks
f_float16 = self.mm_breaks(a_float32, b_float32)
assert f_float16.dtype == f_float16_1.dtype
return f_float16, g_float32
module = MyModule()
real_16, real_32 = module(torch.tensor([0.5]))
real_device_16 = real_16.device
real_dtype_16 = real_16.dtype
real_device_32 = real_32.device
real_dtype_32 = real_32.dtype
graph = torch.compile(module, backend="eager")
out_16, out_32 = graph(torch.tensor([0.5]))
self.assertEqual(out_16.device, real_device_16)
self.assertEqual(out_16.dtype, real_dtype_16)
self.assertEqual(out_32.device, real_device_32)
self.assertEqual(out_32.dtype, real_dtype_32)
self.assertEqual(out_16.device.type, "cpu")
self.assertEqual(out_16.dtype, torch.bfloat16)
self.assertEqual(out_32.device.type, "cpu")
self.assertEqual(out_32.dtype, torch.float32)
def test_autocast_graph_break_method(self):
class MyModule(torch.nn.Module):
def __init__(self, bias):
super().__init__()
self.bias = bias
def mm_not_break(self, x, y):
return torch.mm(x, y) + self.bias
def mm_breaks(self, x, y):
torch._dynamo.graph_break()
return torch.mm(x, y) + self.bias
def forward(self, x):
a_float32 = torch.rand((8, 8), device="cpu")
b_float32 = torch.rand((8, 8), device="cpu")
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
with torch.autocast(
device_type="cpu", dtype=torch.bfloat16, enabled=False
):
g_float32 = torch.mm(a_float32, b_float32)
f_float16 = self.mm_breaks(a_float32, b_float32)
assert (
f_float16[0][0] == self.mm_not_break(a_float32, b_float32)[0][0]
)
return f_float16, g_float32
module = MyModule(bias=torch.rand((8, 8), device="cpu", dtype=torch.bfloat16))
with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
# Autocast doesn't work on addition, so we need the bias to be `bfloat16`
res = torch.rand((8, 8), device="cpu", dtype=torch.float32) + torch.rand(
(8, 8), device="cpu", dtype=torch.bfloat16
)
self.assertEqual(res.dtype, torch.float32)
real_16, real_32 = module(torch.tensor([0.5]))
real_device_16 = real_16.device
real_dtype_16 = real_16.dtype
real_device_32 = real_32.device
real_dtype_32 = real_32.dtype
graph = torch.compile(module, backend="eager")
out_16, out_32 = graph(torch.tensor([0.5]))
self.assertEqual(out_16.device, real_device_16)
self.assertEqual(out_16.dtype, real_dtype_16)
self.assertEqual(out_32.device, real_device_32)
self.assertEqual(out_32.dtype, real_dtype_32)
self.assertEqual(out_16.device.type, "cpu")
self.assertEqual(out_16.dtype, torch.bfloat16)
self.assertEqual(out_32.device.type, "cpu")
self.assertEqual(out_32.dtype, torch.float32)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_autocast_float64(self):
class MyModule(torch.nn.Module):
def forward(self, x):
a_float32 = torch.rand((8, 8), device="cuda")
b_float32 = torch.rand((8, 8), device="cuda")
d_float32 = torch.rand((8, 8), device="cuda")
with torch.autocast(device_type="cuda", dtype=torch.float64):
e_float64 = torch.mm(a_float32, b_float32)
f_float64 = torch.mm(d_float32, e_float64)
return f_float64
module = MyModule()
real = module(torch.tensor([0.5]))
real_device = real.device
real_dtype = real.dtype
graph, _ = torch._dynamo.export(module)(torch.tensor([[0.0, 0], [0, 0]]))
exported = graph(torch.tensor([0.5]))
self.assertEqual(exported.device, real_device)
self.assertEqual(exported.dtype, real_dtype)
self.assertEqual(exported.device.index, 0)
self.assertEqual(exported.dtype, torch.float64)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_autocast_device(self):
class MyModule(torch.nn.Module):
def forward(self, x):
a_float32 = torch.rand((8, 8), device="cuda")
b_float32 = torch.rand((8, 8), device="cuda")
d_float32 = torch.rand((8, 8), device="cuda")
with torch.autocast("cuda"):
e_float64 = torch.mm(a_float32, b_float32)
f_float64 = torch.mm(d_float32, e_float64)
return f_float64
module = MyModule()
real = module(torch.tensor([0.5]))
real_device = real.device
real_dtype = real.dtype
graph, _ = torch._dynamo.export(module)(torch.tensor([[0.0, 0], [0, 0]]))
exported = graph(torch.tensor([0.5]))
self.assertEqual(exported.device, real_device)
self.assertEqual(exported.dtype, real_dtype)
self.assertEqual(exported.device.index, 0)
self.assertEqual(exported.dtype, torch.float16)
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_autocast_arguments_binding(self):
def f1(x):
with torch.autocast(device_type="cuda", enabled=False):
x = torch.sin(x + 1)
return x
def f2(x):
with torch.autocast(device_type="cpu", enabled=False):
x = torch.cos(x + 1)
return x
x = torch.rand([2, 3])
ref1 = f1(x)
ref2 = f2(x)
opt_f1 = torch.compile(backend="eager")(f1)
opt_f2 = torch.compile(backend="eager")(f2)
res1 = opt_f1(x)
res2 = opt_f2(x)
self.assertTrue(same(ref1, res1))
self.assertTrue(same(ref2, res2))
@unittest.skipIf(not torch.cuda.is_available(), "requires cuda")
def test_autocast_decorator(self):
def autocast_func(orig_func):
@torch.amp.autocast(device_type="cuda", dtype=torch.float16)
def new_fwd(*args, **kwargs):
return orig_func(*args, **kwargs)
return new_fwd
def autocast_func_cuda(orig_func):
@torch.autocast(device_type="cuda", dtype=torch.float16)
def new_fwd(*args, **kwargs):
return orig_func(*args, **kwargs)
return new_fwd
def autocast_func_cpu(orig_func):
@torch.autocast(device_type="cpu", dtype=torch.float16)
def new_fwd(*args, **kwargs):
return orig_func(*args, **kwargs)
return new_fwd
def mm(a, b):
return torch.mm(a, b)
mm_float16 = autocast_func(mm)
mm_float16_cuda = autocast_func_cuda(mm)
mm_float16_cpu = autocast_func_cpu(mm)
def fn(a, b):
return mm_float16(a, b), mm_float16_cuda(a, b), mm_float16_cpu(a, b)
a_float32 = torch.rand((8, 8), device="cuda")
b_float32 = torch.rand((8, 8), device="cuda")
ref = fn(a_float32, b_float32)
opt_fn = torch.compile(backend="eager", fullgraph=True)(fn)
res = opt_fn(a_float32, b_float32)
self.assertTrue(same(ref, res))
self.assertTrue(res[0].dtype == torch.float16)
self.assertTrue(res[1].dtype == torch.float16)
@parametrize(
"Ctx",
[CustomizedCtxManagerWithGraphBreak, customized_ctx_manager_with_graph_break],
name_fn=lambda x: x.__name__,
)
def test_generic_ctx_manager_with_graph_break(self, Ctx):
def fn(x):
with Ctx(False):
# body runs on eager
if torch.is_grad_enabled():
z = x + 1000
else:
y = x * 2
z = y.sin() + 3
return z
self.assertTrue(torch.is_grad_enabled())
x = torch.randn(2, 3, requires_grad=True)
expected = fn(x)
got = torch.compile(backend="eager", fullgraph=False)(fn)(x)
self.assertEqual(expected, got)
self.assertTrue(torch.is_grad_enabled())
self.assertFalse(got.requires_grad) # since it was run under torch.no_grad.
def test_return_context_manager(self):
@torch.compile(backend="eager", fullgraph=True)
def f(x):
cm = CustomizedCtxManager(False)
with cm:
pass
return cm
x = torch.randn(2, 3)
cm = f(x)
self.assertFalse(cm.mode)
def test_return_context_manager_with_graph_break(self):
@torch.compile(backend="eager", fullgraph=False)
def f(x):
cm = CustomizedCtxManager(False)
torch._dynamo.graph_break()
with cm:
pass
return cm
x = torch.randn(2, 3)
cm = f(x)
self.assertFalse(cm.mode)
@torch._dynamo.config.patch(enable_trace_contextlib=True)
@parametrize(
"Ctx",
[CustomizedCtxManager, customized_ctx_manager],
name_fn=lambda x: x.__name__,
)
def test_generic_context_manager(self, Ctx):
def fn(x):
with Ctx(True):
x = x + 1
if torch.is_grad_enabled():
x = x * 2
x = torch.relu(x)
return x - 1
x = torch.rand(2, 3)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(backend=cnts, fullgraph=True)(fn)
with torch.no_grad():
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 6)
with torch.enable_grad():
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 12)
@torch._dynamo.config.patch(enable_trace_contextlib=True)
@parametrize(
"Ctx",
[CustomizedCtxManager, customized_ctx_manager],
name_fn=lambda x: x.__name__,
)
def test_nested_generic_context_manager(self, Ctx):
def fn(x):
with Ctx(True):
x = x + 1
if torch.is_grad_enabled():
x = x * 2
with Ctx(False):
if torch.is_grad_enabled():
x = x - 3
x = x * 1.5
x = torch.relu(x)
return x - 1
x = torch.rand(2, 3)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(backend=cnts, fullgraph=True)(fn)
with torch.no_grad():
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(cnts.op_count, 9)
with torch.enable_grad():
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 18)
@torch._dynamo.config.patch(enable_trace_contextlib=True)
@parametrize(
"Ctx",
[CustomizedCtxManager, customized_ctx_manager],
name_fn=lambda x: x.__name__,
)
def test_generic_context_manager_with_graph_break(self, Ctx):
def fn(x):
with Ctx(True):
x = x + 1
if torch.is_grad_enabled():
x = x * 2
torch._dynamo.graph_break()
x = torch.relu(x)
return x - 1
x = torch.rand(2, 3)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(backend=cnts, fullgraph=False)(fn)
with torch.no_grad():
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
if Ctx is CustomizedCtxManager:
self.assertEqual(cnts.frame_count, 2)
self.assertEqual(cnts.op_count, 2)
with torch.enable_grad():
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
if Ctx is CustomizedCtxManager:
self.assertEqual(cnts.frame_count, 4)
self.assertEqual(cnts.op_count, 4)
@torch._dynamo.config.patch(enable_trace_contextlib=True)
@parametrize(
"Ctx",
[CustomizedCtxManager, customized_ctx_manager],
name_fn=lambda x: x.__name__,
)
def test_nested_generic_context_manager_with_graph_break(self, Ctx):
def fn(x):
with Ctx(True):
x = x + 1
if torch.is_grad_enabled():
x = x * 2
with Ctx(False):
if torch.is_grad_enabled():
x = x - 3
torch._dynamo.graph_break()
x = x * 1.5
x = torch.relu(x)
return x - 1
x = torch.rand(2, 3)
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(backend=cnts, fullgraph=False)(fn)
with torch.no_grad():
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
if Ctx is CustomizedCtxManager:
self.assertEqual(cnts.frame_count, 4)
self.assertEqual(cnts.op_count, 4)
torch._dynamo.reset()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts, fullgraph=False)
with torch.enable_grad():
ref = fn(x)
res = opt_fn(x)
self.assertTrue(same(ref, res))
if Ctx is CustomizedCtxManager:
self.assertEqual(cnts.frame_count, 4)
self.assertEqual(cnts.op_count, 4)
def test_graph_break_inlining_grad(self):
def gn(z):
with torch.no_grad():
torch._dynamo.graph_break()
return torch.sin(z)
def fn(x, y, z):
a = torch.mm(x, y)
z = gn(z)
return a
torch._dynamo.reset()
cnts = torch._dynamo.testing.CompileCounter()
opt_fn = torch.compile(fn, backend=cnts, fullgraph=False)
x = torch.randn(4, 4, requires_grad=True)
y = torch.randn(4, 4, requires_grad=True)
z = torch.randn(4)
opt_fn(x, y, z).sum().backward()
self.assertEqual(cnts.frame_count, 2)
def _graph_break_inlining_autocast_test_helper(self, device):
def gn(x, y):
with torch.autocast(device_type=device, dtype=torch.bfloat16):
z = torch.mm(x, y)
torch._dynamo.graph_break()
return torch.sin(z)
def fn(x, y):
z = torch.mm(x, y)
z = z + gn(x, y)
return z
x = torch.rand(3, 3).to(device)
y = torch.rand(3, 3).to(device)
opt_fn = torch.compile(backend="eager")(fn)
ref = fn(x, y)
res = opt_fn(x, y)
self.assertEqual(ref, res)
def test_graph_break_inlining_autocast(self):
for device in ["cuda", "cpu"]:
if device == "cuda" and not (
torch.cuda.is_available() and torch.cuda.is_bf16_supported()
):
continue
self._graph_break_inlining_autocast_test_helper(device)
def test_disable_saved_tensors_hooks(self):
def fn(z):
@torch.autograd.graph.disable_saved_tensors_hooks("This is not supported")
def f(x, y):
return x + y
x, y = (
torch.ones(
1,
),
torch.zeros(
1,
),
)
return f(x, y)
eager = EagerAndRecordGraphs()
torch.compile(fn, backend=eager, fullgraph=True)(torch.randn(()))
graph = eager.graphs[0]
actual = normalize_gm(graph.print_readable(False))
self.assertExpectedInline(
actual,
"""\
| CtxManagerTests |
python | great-expectations__great_expectations | great_expectations/data_context/types/base.py | {
"start": 15938,
"end": 21211
} | class ____(AbstractConfig):
def __init__( # noqa: C901, PLR0912, PLR0913, PLR0915 # FIXME CoP
self,
class_name,
name: Optional[str] = None,
id: Optional[str] = None,
module_name=None,
credentials=None,
assets=None,
base_directory=None,
glob_directive=None,
default_regex=None,
batch_identifiers=None,
# S3
boto3_options=None,
bucket=None,
max_keys=None,
# Azure
azure_options=None,
container=None,
name_starts_with=None,
# GCS
bucket_or_name=None,
max_results=None,
# Both S3/GCS
prefix=None,
# Both S3/Azure
delimiter=None,
data_asset_name_prefix=None,
data_asset_name_suffix=None,
include_schema_name=None,
partitioner_method=None,
partitioner_kwargs=None,
sorters=None,
sampling_method=None,
sampling_kwargs=None,
excluded_tables=None,
included_tables=None,
skip_inapplicable_tables=None,
introspection_directives=None,
batch_spec_passthrough=None,
**kwargs,
) -> None:
self._class_name = class_name
self._module_name = module_name
if credentials is not None:
self.credentials = credentials
if assets is not None:
self.assets = assets
if base_directory is not None:
self.base_directory = base_directory
if glob_directive is not None:
self.glob_directive = glob_directive
if default_regex is not None:
self.default_regex = default_regex
if batch_identifiers is not None:
self.batch_identifiers = batch_identifiers
if data_asset_name_prefix is not None:
self.data_asset_name_prefix = data_asset_name_prefix
if data_asset_name_suffix is not None:
self.data_asset_name_suffix = data_asset_name_suffix
if include_schema_name is not None:
self.include_schema_name = include_schema_name
if partitioner_method is not None:
self.partitioner_method = partitioner_method
if partitioner_kwargs is not None:
self.partitioner_kwargs = partitioner_kwargs
if sorters is not None:
self.sorters = sorters
if sampling_method is not None:
self.sampling_method = sampling_method
if sampling_kwargs is not None:
self.sampling_kwargs = sampling_kwargs
if excluded_tables is not None:
self.excluded_tables = excluded_tables
if included_tables is not None:
self.included_tables = included_tables
if skip_inapplicable_tables is not None:
self.skip_inapplicable_tables = skip_inapplicable_tables
if introspection_directives is not None:
self.introspection_directives = introspection_directives
if batch_spec_passthrough is not None:
self.batch_spec_passthrough = batch_spec_passthrough
# S3
if boto3_options is not None:
self.boto3_options = boto3_options
if bucket is not None:
self.bucket = bucket
if max_keys is not None:
self.max_keys = max_keys
# Azure
if azure_options is not None:
self.azure_options = azure_options
if container is not None:
self.container = container
if name_starts_with is not None:
self.name_starts_with = name_starts_with
# GCS
if bucket_or_name is not None:
self.bucket_or_name = bucket_or_name
if max_results is not None:
self.max_results = max_results
# Both S3/GCS
if prefix is not None:
self.prefix = prefix
# Both S3/Azure
if delimiter is not None:
self.delimiter = delimiter
super().__init__(id=id, name=name)
# Note: optional samplers and partitioners are handled by setattr
for k, v in kwargs.items():
setattr(self, k, v)
@property
def class_name(self):
return self._class_name
@property
def module_name(self):
return self._module_name
@override
def to_json_dict(self) -> Dict[str, JSONValues]:
"""Returns a JSON-serializable dict representation of this DataConnectorConfig.
Returns:
A JSON-serializable dict representation of this DataConnectorConfig.
"""
# # TODO: <Alex>2/4/2022</Alex>
# This implementation of "SerializableDictDot.to_json_dict() occurs frequently and should ideally serve as the # noqa: E501 # FIXME CoP
# reference implementation in the "SerializableDictDot" class itself. However, the circular import dependencies, # noqa: E501 # FIXME CoP
# due to the location of the "great_expectations/types/__init__.py" and "great_expectations/core/util.py" modules # noqa: E501 # FIXME CoP
# make this refactoring infeasible at the present time.
dict_obj: dict = self.to_dict()
serializeable_dict: dict = convert_to_json_serializable(data=dict_obj)
return serializeable_dict
| DataConnectorConfig |
python | astropy__astropy | astropy/visualization/wcsaxes/tests/test_images.py | {
"start": 48119,
"end": 49960
} | class ____(BaseLowLevelWCS):
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return [
"pos.eq.ra",
"pos.eq.dec",
]
@property
def world_axis_units(self):
return ["arcsec", "arcsec"]
@property
def world_axis_names(self):
return ["RA", "DEC"]
def pixel_to_world_values(self, *pixel_arrays):
return pixel_arrays
def world_to_pixel_values(self, *world_arrays):
return world_arrays
@property
def world_axis_object_components(self):
return [
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
]
@property
def world_axis_object_classes(self):
return {
"celestial": (SkyCoord, (), {"unit": "deg"}),
}
@figure_test
def test_equatorial_arcsec():
wcs = EquatorialArcsecWCS()
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(projection=wcs)
ax.set_xlim(-0.5, 20 - 0.5)
ax.set_ylim(-0.5, 30 - 0.5)
return fig
@figure_test
def test_wcs_preserve_units():
# Test to make sure WCS with preserve_units=True works fine
header = fits.Header()
header["CTYPE1"] = "RA---TAN"
header["CTYPE2"] = "DEC--TAN"
header["CUNIT1"] = "arcsec"
header["CUNIT2"] = "arcsec"
header["CRVAL1"] = 20
header["CRVAL2"] = 20
header["CRPIX1"] = 1
header["CRPIX2"] = 1
header["CDELT1"] = -1
header["CDELT2"] = 1
wcs = WCS(header, preserve_units=True)
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.set_xlim(-0.5, 30)
ax.set_ylim(-0.5, 20)
return fig
| EquatorialArcsecWCS |
python | Pylons__pyramid | src/pyramid/registry.py | {
"start": 287,
"end": 4303
} | class ____(Components, dict):
"""A registry object is an :term:`application registry`.
It is used by the framework itself to perform mappings of URLs to view
callables, as well as servicing other various framework duties. A registry
has its own internal API, but this API is rarely used by Pyramid
application developers (it's usually only used by developers of the
Pyramid framework and Pyramid addons). But it has a number of attributes
that may be useful to application developers within application code,
such as ``settings``, which is a dictionary containing application
deployment settings.
For information about the purpose and usage of the application registry,
see :ref:`zca_chapter`.
The registry may be used both as an :class:`pyramid.interfaces.IDict` and
as a Zope component registry.
These two ways of storing configuration are independent.
Applications will tend to prefer to store information as key-values
whereas addons may prefer to use the component registry to avoid naming
conflicts and to provide more complex lookup mechanisms.
The application registry is usually accessed as ``request.registry`` in
application code. By the time a registry is used to handle requests it
should be considered frozen and read-only. Any changes to its internal
state should be done with caution and concern for thread-safety.
"""
# for optimization purposes, if no listeners are listening, don't try
# to notify them
has_listeners = False
_settings = None
def __init__(self, package_name=CALLER_PACKAGE, *args, **kw):
# add a registry-instance-specific lock, which is used when the lookup
# cache is mutated
self._lock = threading.Lock()
# add a view lookup cache
self._clear_view_lookup_cache()
if package_name is CALLER_PACKAGE:
package_name = caller_package().__name__
Components.__init__(self, package_name, *args, **kw)
dict.__init__(self)
def _clear_view_lookup_cache(self):
self._view_lookup_cache = {}
def __bool__(self):
# defeat bool determination via dict.__len__
return True
@reify
def package_name(self):
return self.__name__
def registerSubscriptionAdapter(self, *arg, **kw):
result = Components.registerSubscriptionAdapter(self, *arg, **kw)
self.has_listeners = True
return result
def registerSelfAdapter(
self, required=None, provided=None, name='', info='', event=True
):
# registerAdapter analogue which always returns the object itself
# when required is matched
return self.registerAdapter(
lambda x: x,
required=required,
provided=provided,
name=name,
info=info,
event=event,
)
def queryAdapterOrSelf(self, object, interface, default=None):
# queryAdapter analogue which returns the object if it implements
# the interface, otherwise it will return an adaptation to the
# interface
if not interface.providedBy(object):
return self.queryAdapter(object, interface, default=default)
return object
def registerHandler(self, *arg, **kw):
result = Components.registerHandler(self, *arg, **kw)
self.has_listeners = True
return result
def notify(self, *events):
if self.has_listeners:
# iterating over subscribers assures they get executed
[_ for _ in self.subscribers(events, None)]
# backwards compatibility for code that wants to look up a settings
# object via ``registry.getUtility(ISettings)``
def _get_settings(self):
return self._settings
def _set_settings(self, settings):
self.registerUtility(settings, ISettings)
self._settings = settings
settings = property(_get_settings, _set_settings)
@implementer(IIntrospector)
| Registry |
python | django__django | django/contrib/auth/apps.py | {
"start": 454,
"end": 1467
} | class ____(AppConfig):
default_auto_field = "django.db.models.AutoField"
name = "django.contrib.auth"
verbose_name = _("Authentication and Authorization")
def ready(self):
post_migrate.connect(
create_permissions,
dispatch_uid="django.contrib.auth.management.create_permissions",
)
pre_migrate.connect(
rename_permissions,
dispatch_uid="django.contrib.auth.management.rename_permissions",
)
last_login_field = getattr(get_user_model(), "last_login", None)
# Register the handler only if UserModel.last_login is a field.
if isinstance(last_login_field, DeferredAttribute):
from .models import update_last_login
user_logged_in.connect(update_last_login, dispatch_uid="update_last_login")
checks.register(check_user_model, checks.Tags.models)
checks.register(check_models_permissions, checks.Tags.models)
checks.register(check_middleware)
| AuthConfig |
python | tensorflow__tensorflow | tensorflow/python/ops/losses/util_test.py | {
"start": 957,
"end": 1875
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testGetRegularizationLoss(self):
# Empty regularization collection should evaluate to 0.0.
with self.cached_session():
self.assertEqual(0.0, util.get_regularization_loss().eval())
# Loss should sum.
ops.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(2.0))
ops.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(3.0))
with self.cached_session():
self.assertEqual(5.0, util.get_regularization_loss().eval())
# Check scope capture mechanism.
with ops.name_scope('scope1'):
ops.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(-1.0))
with self.cached_session():
self.assertEqual(-1.0, util.get_regularization_loss('scope1').eval())
if __name__ == '__main__':
test.main()
| LossesUtilTest |
python | sqlalchemy__sqlalchemy | test/sql/test_returning.py | {
"start": 24419,
"end": 29439
} | class ____(fixtures.TablesTest):
__requires__ = ("insert_returning",)
run_define_tables = "each"
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
from sqlalchemy.sql import ColumnElement
from sqlalchemy.ext.compiler import compiles
counter = itertools.count()
class IncDefault(ColumnElement):
pass
@compiles(IncDefault)
def compile_(element, compiler, **kw):
return str(next(counter))
Table(
"t1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
Column("insdef", Integer, default=IncDefault()),
Column("upddef", Integer, onupdate=IncDefault()),
)
Table(
"table_no_addtl_defaults",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
class MyType(TypeDecorator):
impl = String(50)
def process_result_value(self, value, dialect):
return f"PROCESSED! {value}"
Table(
"table_datatype_has_result_proc",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", MyType()),
)
def test_chained_insert_pk(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().values(upddef=1).return_defaults(t1.c.insdef)
)
eq_(
[
result.returned_defaults._mapping[k]
for k in (t1.c.id, t1.c.insdef)
],
[1, 0],
)
def test_arg_insert_pk(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults(t1.c.insdef).values(upddef=1)
)
eq_(
[
result.returned_defaults._mapping[k]
for k in (t1.c.id, t1.c.insdef)
],
[1, 0],
)
def test_insert_non_default(self, connection):
"""test that a column not marked at all as a
default works with this feature."""
t1 = self.tables.t1
result = connection.execute(
t1.insert().values(upddef=1).return_defaults(t1.c.data)
)
eq_(
[
result.returned_defaults._mapping[k]
for k in (t1.c.id, t1.c.data)
],
[1, None],
)
def test_insert_sql_expr(self, connection):
from sqlalchemy import literal
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults().values(insdef=literal(10) + 5)
)
eq_(
result.returned_defaults._mapping,
{"id": 1, "data": None, "insdef": 15, "upddef": None},
)
def test_insert_non_default_plus_default(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert()
.values(upddef=1)
.return_defaults(t1.c.data, t1.c.insdef)
)
eq_(
dict(result.returned_defaults._mapping),
{"id": 1, "data": None, "insdef": 0},
)
eq_(result.inserted_primary_key, (1,))
def test_insert_all(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().values(upddef=1).return_defaults()
)
eq_(
dict(result.returned_defaults._mapping),
{"id": 1, "data": None, "insdef": 0},
)
eq_(result.inserted_primary_key, (1,))
def test_insert_w_defaults_supplemental_cols(self, connection):
t1 = self.tables.t1
result = connection.execute(
t1.insert().return_defaults(supplemental_cols=[t1.c.id]),
{"data": "d1"},
)
eq_(result.all(), [(1, 0, None)])
def test_insert_w_no_defaults_supplemental_cols(self, connection):
t1 = self.tables.table_no_addtl_defaults
result = connection.execute(
t1.insert().return_defaults(supplemental_cols=[t1.c.id]),
{"data": "d1"},
)
eq_(result.all(), [(1,)])
def test_insert_w_defaults_supplemental_processor_cols(self, connection):
"""test that the cursor._rewind() used by supplemental RETURNING
clears out result-row processors as we will have already processed
the rows.
"""
t1 = self.tables.table_datatype_has_result_proc
result = connection.execute(
t1.insert().return_defaults(
supplemental_cols=[t1.c.id, t1.c.data]
),
{"data": "d1"},
)
eq_(result.all(), [(1, "PROCESSED! d1")])
| InsertReturnDefaultsTest |
python | dask__dask | dask/_expr.py | {
"start": 36556,
"end": 39349
} | class ____(Expr):
"""A sequence of expressions
This is used to be able to optimize multiple collections combined, e.g. when
being computed simultaneously with ``dask.compute((Expr1, Expr2))``.
"""
def __getitem__(self, other):
return self.operands[other]
def _layer(self) -> dict:
return toolz.merge(op._layer() for op in self.operands)
def __dask_keys__(self) -> list:
all_keys = []
for op in self.operands:
all_keys.append(list(op.__dask_keys__()))
return all_keys
def __repr__(self):
return "ExprSequence(" + ", ".join(map(repr, self.operands)) + ")"
__str__ = __repr__
def finalize_compute(self):
return _ExprSequence(
*(op.finalize_compute() for op in self.operands),
)
def __dask_annotations__(self):
annotations_by_type = {}
for op in self.operands:
for k, v in op.__dask_annotations__().items():
annotations_by_type.setdefault(k, {}).update(v)
return annotations_by_type
def __len__(self):
return len(self.operands)
def __iter__(self):
return iter(self.operands)
def _simplify_down(self):
from dask.highlevelgraph import HighLevelGraph
issue_warning = False
hlgs = []
if any(
isinstance(op, (HLGExpr, HLGFinalizeCompute, dict)) for op in self.operands
):
for op in self.operands:
if isinstance(op, (HLGExpr, HLGFinalizeCompute)):
hlgs.append(op)
elif isinstance(op, dict):
hlgs.append(
HLGExpr(
dsk=HighLevelGraph.from_collections(
str(id(op)), op, dependencies=()
)
)
)
else:
issue_warning = True
opt = op.optimize()
hlgs.append(
HLGExpr(
dsk=HighLevelGraph.from_collections(
opt._name, opt.__dask_graph__(), dependencies=()
)
)
)
if issue_warning:
warnings.warn(
"Computing mixed collections that are backed by "
"HighlevelGraphs/dicts and Expressions. "
"This forces Expressions to be materialized. "
"It is recommended to use only one type and separate the dask."
"compute calls if necessary.",
UserWarning,
)
if not hlgs:
return None
return _HLGExprSequence(*hlgs)
| _ExprSequence |
python | python-poetry__poetry | tests/vcs/git/git_fixture.py | {
"start": 133,
"end": 283
} | class ____(typing.NamedTuple):
path: Path
repo: dulwich.repo.Repo
init_commit: str
middle_commit: str
head_commit: str
| TempRepoFixture |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydoclint/DOC502_google.py | {
"start": 0,
"end": 3685
} | class ____(Exception):
...
# DOC502
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
Raises:
FasterThanLightError: If speed is greater than the speed of light.
"""
return distance / time
# DOC502
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
Raises:
FasterThanLightError: If speed is greater than the speed of light.
DivisionByZero: Divide by zero.
"""
return distance / time
# DOC502
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
Raises:
FasterThanLightError: If speed is greater than the speed of light.
DivisionByZero: Divide by zero.
"""
try:
return distance / time
except ZeroDivisionError as exc:
raise FasterThanLightError from exc
# This is fine
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
Raises:
ZeroDivisionError: If you pass `0` for the time
TypeError: if you didn't pass a number for both parameters
"""
try:
return distance / time
except ZeroDivisionError:
print("Oh no, why would you divide something by zero?")
raise
except TypeError:
print("Not a number? Shame on you!")
raise
# DOC502 regression for Sphinx directive after Raises (issue #18959)
def foo():
"""First line.
Raises:
ValueError:
some text
.. versionadded:: 0.7.0
The ``init_kwargs`` argument.
"""
raise ValueError
# DOC502 regression for following section with colons
def example_with_following_section():
"""Summary.
Returns:
str: The resulting expression.
Raises:
ValueError: If the unit is not valid.
Relation to `time_range_lookup`:
- Handles the "start of" modifier.
- Example: "start of month" → `DATETRUNC()`.
"""
raise ValueError
# This should NOT trigger DOC502 because OSError is explicitly re-raised
def f():
"""Do nothing.
Raises:
OSError: If the OS errors.
"""
try:
pass
except OSError as e:
raise e
# This should NOT trigger DOC502 because OSError is explicitly re-raised with from None
def g():
"""Do nothing.
Raises:
OSError: If the OS errors.
"""
try:
pass
except OSError as e:
raise e from None
# This should NOT trigger DOC502 because ValueError is explicitly re-raised from tuple exception
def h():
"""Do nothing.
Raises:
ValueError: If something goes wrong.
"""
try:
pass
except (ValueError, TypeError) as e:
raise e
# This should NOT trigger DOC502 because TypeError is explicitly re-raised from tuple exception
def i():
"""Do nothing.
Raises:
TypeError: If something goes wrong.
"""
try:
pass
except (ValueError, TypeError) as e:
raise e
| FasterThanLightError |
python | lepture__authlib | authlib/jose/errors.py | {
"start": 958,
"end": 1367
} | class ____(JoseError):
error = "invalid_encryption_algorithm_for_ECDH_1PU_with_key_wrapping"
def __init__(self):
description = (
"In key agreement with key wrapping mode ECDH-1PU algorithm "
"only supports AES_CBC_HMAC_SHA2 family encryption algorithms"
)
super().__init__(description=description)
| InvalidEncryptionAlgorithmForECDH1PUWithKeyWrappingError |
python | getsentry__sentry | src/sentry/utils/warnings.py | {
"start": 2286,
"end": 3404
} | class ____:
"""
Add-only set structure for storing unique warnings.
"""
def __init__(self) -> None:
self.__warnings: dict[tuple[object, ...], Warning] = {}
def __contains__(self, value: object) -> bool:
assert isinstance(value, Warning)
return self.__get_key(value) in self.__warnings
def __len__(self) -> int:
return len(self.__warnings)
def __iter__(self) -> Iterator[Warning]:
yield from self.__warnings.values()
def __get_key(self, warning: Warning) -> tuple[object, ...]:
return (type(warning), warning.args if hasattr(warning, "args") else str(warning))
def add(self, warning: Warning, stacklevel: int | None = None) -> None:
self.__warnings[self.__get_key(warning)] = warning
# Maintains all unique warnings seen since system startup.
seen_warnings = WarningSet()
manager = WarningManager(
(
lambda warning, stacklevel=1: warnings.warn(warning, stacklevel=stacklevel + 2),
seen_warnings.add,
)
)
# Make this act like the standard library ``warnings`` module.
warn = manager.warn
| WarningSet |
python | dask__distributed | distributed/core.py | {
"start": 2380,
"end": 3255
} | class ____(IOError):
pass
logger = logging.getLogger(__name__)
def raise_later(exc):
def _raise(*args, **kwargs):
raise exc
return _raise
tick_maximum_delay = parse_timedelta(
dask.config.get("distributed.admin.tick.limit"), default="ms"
)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
@functools.cache
def _expects_comm(func: Callable) -> bool:
sig = inspect.signature(func)
params = list(sig.parameters)
if params and params[0] == "comm":
return True
if params and params[0] == "stream":
warnings.warn(
"Calling the first argument of a RPC handler `stream` is "
"deprecated. Defining this argument is optional. Either remove the "
f"argument or rename it to `comm` in {func}.",
FutureWarning,
)
return True
return False
| RPCClosed |
python | cython__cython | tests/run/function_as_method_py_T494.py | {
"start": 248,
"end": 325
} | class ____(object):
"""
>>> C.plus1(1)
2
"""
plus1 = f_plus
| C |
python | pypa__pip | src/pip/_vendor/rich/errors.py | {
"start": 569,
"end": 642
} | class ____(ConsoleError):
"""Alt screen mode was required."""
| NoAltScreen |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 13954,
"end": 14014
} | class ____(models.Model):
photo = models.FileField()
| Photo |
python | aimacode__aima-python | search.py | {
"start": 17343,
"end": 25944
} | class ____(Problem):
""" The problem of moving the Hybrid Wumpus Agent from one place to other """
def __init__(self, initial, goal, allowed, dimrow):
""" Define goal state and initialize a problem """
super().__init__(initial, goal)
self.dimrow = dimrow
self.goal = goal
self.allowed = allowed
def actions(self, state):
""" Return the actions that can be executed in the given state.
The result would be a list, since there are only three possible actions
in any given state of the environment """
possible_actions = ['Forward', 'TurnLeft', 'TurnRight']
x, y = state.get_location()
orientation = state.get_orientation()
# Prevent Bumps
if x == 1 and orientation == 'LEFT':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if y == 1 and orientation == 'DOWN':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if x == self.dimrow and orientation == 'RIGHT':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if y == self.dimrow and orientation == 'UP':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
return possible_actions
def result(self, state, action):
""" Given state and action, return a new state that is the result of the action.
Action is assumed to be a valid action in the state """
x, y = state.get_location()
proposed_loc = list()
# Move Forward
if action == 'Forward':
if state.get_orientation() == 'UP':
proposed_loc = [x, y + 1]
elif state.get_orientation() == 'DOWN':
proposed_loc = [x, y - 1]
elif state.get_orientation() == 'LEFT':
proposed_loc = [x - 1, y]
elif state.get_orientation() == 'RIGHT':
proposed_loc = [x + 1, y]
else:
raise Exception('InvalidOrientation')
# Rotate counter-clockwise
elif action == 'TurnLeft':
if state.get_orientation() == 'UP':
state.set_orientation('LEFT')
elif state.get_orientation() == 'DOWN':
state.set_orientation('RIGHT')
elif state.get_orientation() == 'LEFT':
state.set_orientation('DOWN')
elif state.get_orientation() == 'RIGHT':
state.set_orientation('UP')
else:
raise Exception('InvalidOrientation')
# Rotate clockwise
elif action == 'TurnRight':
if state.get_orientation() == 'UP':
state.set_orientation('RIGHT')
elif state.get_orientation() == 'DOWN':
state.set_orientation('LEFT')
elif state.get_orientation() == 'LEFT':
state.set_orientation('UP')
elif state.get_orientation() == 'RIGHT':
state.set_orientation('DOWN')
else:
raise Exception('InvalidOrientation')
if proposed_loc in self.allowed:
state.set_location(proposed_loc[0], [proposed_loc[1]])
return state
def goal_test(self, state):
""" Given a state, return True if state is a goal state or False, otherwise """
return state.get_location() == tuple(self.goal)
def h(self, node):
""" Return the heuristic value for a given state."""
# Manhattan Heuristic Function
x1, y1 = node.state.get_location()
x2, y2 = self.goal
return abs(x2 - x1) + abs(y2 - y1)
# ______________________________________________________________________________
# Other search algorithms
def recursive_best_first_search(problem, h=None):
"""[Figure 3.26]"""
h = memoize(h or problem.h, 'h')
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node, 0 # (The second value is immaterial)
successors = node.expand(problem)
if len(successors) == 0:
return None, np.inf
for s in successors:
s.f = max(s.path_cost + h(s), node.f)
while True:
# Order by lowest f value
successors.sort(key=lambda x: x.f)
best = successors[0]
if best.f > flimit:
return None, best.f
if len(successors) > 1:
alternative = successors[1].f
else:
alternative = np.inf
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result, best.f
node = Node(problem.initial)
node.f = h(node)
result, bestf = RBFS(problem, node, np.inf)
return result
def hill_climbing(problem):
"""
[Figure 4.2]
From the initial node, keep choosing the neighbor with highest value,
stopping when no neighbor is better.
"""
current = Node(problem.initial)
while True:
neighbors = current.expand(problem)
if not neighbors:
break
neighbor = argmax_random_tie(neighbors, key=lambda node: problem.value(node.state))
if problem.value(neighbor.state) <= problem.value(current.state):
break
current = neighbor
return current.state
def exp_schedule(k=20, lam=0.005, limit=100):
"""One possible schedule function for simulated annealing"""
return lambda t: (k * np.exp(-lam * t) if t < limit else 0)
def simulated_annealing(problem, schedule=exp_schedule()):
"""[Figure 4.5] CAUTION: This differs from the pseudocode as it
returns a state instead of a Node."""
current = Node(problem.initial)
for t in range(sys.maxsize):
T = schedule(t)
if T == 0:
return current.state
neighbors = current.expand(problem)
if not neighbors:
return current.state
next_choice = random.choice(neighbors)
delta_e = problem.value(next_choice.state) - problem.value(current.state)
if delta_e > 0 or probability(np.exp(delta_e / T)):
current = next_choice
def simulated_annealing_full(problem, schedule=exp_schedule()):
""" This version returns all the states encountered in reaching
the goal state."""
states = []
current = Node(problem.initial)
for t in range(sys.maxsize):
states.append(current.state)
T = schedule(t)
if T == 0:
return states
neighbors = current.expand(problem)
if not neighbors:
return current.state
next_choice = random.choice(neighbors)
delta_e = problem.value(next_choice.state) - problem.value(current.state)
if delta_e > 0 or probability(np.exp(delta_e / T)):
current = next_choice
def and_or_graph_search(problem):
"""[Figure 4.11]Used when the environment is nondeterministic and completely observable.
Contains OR nodes where the agent is free to choose any action.
After every action there is an AND node which contains all possible states
the agent may reach due to stochastic nature of environment.
The agent must be able to handle all possible states of the AND node (as it
may end up in any of them).
Returns a conditional plan to reach goal state,
or failure if the former is not possible."""
# functions used by and_or_search
def or_search(state, problem, path):
"""returns a plan as a list of actions"""
if problem.goal_test(state):
return []
if state in path:
return None
for action in problem.actions(state):
plan = and_search(problem.result(state, action),
problem, path + [state, ])
if plan is not None:
return [action, plan]
def and_search(states, problem, path):
"""Returns plan in form of dictionary where we take action plan[s] if we reach state s."""
plan = {}
for s in states:
plan[s] = or_search(s, problem, path)
if plan[s] is None:
return None
return plan
# body of and or search
return or_search(problem.initial, problem, [])
# Pre-defined actions for PeakFindingProblem
directions4 = {'W': (-1, 0), 'N': (0, 1), 'E': (1, 0), 'S': (0, -1)}
directions8 = dict(directions4)
directions8.update({'NW': (-1, 1), 'NE': (1, 1), 'SE': (1, -1), 'SW': (-1, -1)})
| PlanRoute |
python | spack__spack | lib/spack/spack/oci/opener.py | {
"start": 1425,
"end": 1826
} | class ____(spack.tokenize.TokenBase):
AUTH_PARAM = rf"({token}){BWS}={BWS}({token}|{quoted_string})"
# TOKEN68 = r"([A-Za-z0-9\-._~+/]+=*)" # todo... support this?
TOKEN = rf"{tchar}+"
EQUALS = rf"{BWS}={BWS}"
COMMA = rf"{OWS},{OWS}"
SPACE = r" +"
EOF = r"$"
ANY = r"."
WWW_AUTHENTICATE_TOKENIZER = spack.tokenize.Tokenizer(WwwAuthenticateTokens)
| WwwAuthenticateTokens |
python | doocs__leetcode | solution/2800-2899/2847.Smallest Number With Given Digit Product/Solution.py | {
"start": 0,
"end": 338
} | class ____:
def smallestNumber(self, n: int) -> str:
cnt = [0] * 10
for i in range(9, 1, -1):
while n % i == 0:
n //= i
cnt[i] += 1
if n > 1:
return "-1"
ans = "".join(str(i) * cnt[i] for i in range(2, 10))
return ans if ans else "1"
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B017_0.py | {
"start": 247,
"end": 1985
} | class ____(unittest.TestCase):
def evil_raises(self) -> None:
with self.assertRaises(Exception):
raise Exception("Evil I say!")
def also_evil_raises(self) -> None:
with self.assertRaises(BaseException):
raise Exception("Evil I say!")
def context_manager_raises(self) -> None:
with self.assertRaises(Exception) as ex:
raise Exception("Context manager is good")
self.assertEqual("Context manager is good", str(ex.exception))
def regex_raises(self) -> None:
with self.assertRaisesRegex(Exception, "Regex is good"):
raise Exception("Regex is good")
def raises_with_absolute_reference(self):
with self.assertRaises(asyncio.CancelledError):
Foo()
def test_pytest_raises():
with pytest.raises(Exception):
raise ValueError("Hello")
with pytest.raises(Exception), pytest.raises(ValueError):
raise ValueError("Hello")
with pytest.raises(Exception, "hello"):
raise ValueError("This is fine")
with pytest.raises(Exception, match="hello"):
raise ValueError("This is also fine")
with contextlib.nullcontext(), pytest.raises(Exception):
raise ValueError("Multiple context managers")
def test_pytest_raises_keyword():
with pytest.raises(expected_exception=Exception):
raise ValueError("Should be flagged")
def test_assert_raises_keyword():
class TestKwargs(unittest.TestCase):
def test_method(self):
with self.assertRaises(exception=Exception):
raise ValueError("Should be flagged")
with self.assertRaises(exception=BaseException):
raise ValueError("Should be flagged")
| Foobar |
python | apache__airflow | providers/standard/src/airflow/providers/standard/example_dags/example_hitl_operator.py | {
"start": 1257,
"end": 6025
} | class ____(BaseNotifier):
"""Simple notifier to demonstrate HITL notification without setup any connection."""
template_fields = ("message",)
def __init__(self, message: str) -> None:
self.message = message
def notify(self, context: Context) -> None:
url = HITLOperator.generate_link_to_ui_from_context(
context=context,
base_url="http://localhost:28080",
)
self.log.info(self.message)
self.log.info("Url to respond %s", url)
hitl_request_callback = LocalLogNotifier(
message="""
[HITL]
Subject: {{ task.subject }}
Body: {{ task.body }}
Options: {{ task.options }}
Is Multiple Option: {{ task.multiple }}
Default Options: {{ task.defaults }}
Params: {{ task.params }}
"""
)
hitl_success_callback = LocalLogNotifier(
message="{% set task_id = task.task_id -%}{{ ti.xcom_pull(task_ids=task_id) }}"
)
hitl_failure_callback = LocalLogNotifier(message="Request to response to '{{ task.subject }}' failed")
# [END hitl_notifier]
with DAG(
dag_id="example_hitl_operator",
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
catchup=False,
tags=["example", "HITL"],
):
# [START howto_hitl_entry_operator]
wait_for_input = HITLEntryOperator(
task_id="wait_for_input",
subject="Please provide required information: ",
params={"information": Param("", type="string")},
notifiers=[hitl_request_callback],
on_success_callback=hitl_success_callback,
on_failure_callback=hitl_failure_callback,
)
# [END howto_hitl_entry_operator]
# [START howto_hitl_operator]
wait_for_option = HITLOperator(
task_id="wait_for_option",
subject="Please choose one option to proceed: ",
options=["option 1", "option 2", "option 3"],
notifiers=[hitl_request_callback],
on_success_callback=hitl_success_callback,
on_failure_callback=hitl_failure_callback,
)
# [END howto_hitl_operator]
# [START howto_hitl_operator_multiple]
wait_for_multiple_options = HITLOperator(
task_id="wait_for_multiple_options",
subject="Please choose option to proceed: ",
options=["option 4", "option 5", "option 6"],
multiple=True,
notifiers=[hitl_request_callback],
on_success_callback=hitl_success_callback,
on_failure_callback=hitl_failure_callback,
)
# [END howto_hitl_operator_multiple]
# [START howto_hitl_operator_timeout]
wait_for_default_option = HITLOperator(
task_id="wait_for_default_option",
subject="Please choose option to proceed: ",
options=["option 7", "option 8", "option 9"],
defaults=["option 7"],
execution_timeout=datetime.timedelta(seconds=1),
notifiers=[hitl_request_callback],
on_success_callback=hitl_success_callback,
on_failure_callback=hitl_failure_callback,
)
# [END howto_hitl_operator_timeout]
# [START howto_hitl_approval_operator]
valid_input_and_options = ApprovalOperator(
task_id="valid_input_and_options",
subject="Are the following input and options valid?",
body="""
Input: {{ ti.xcom_pull(task_ids='wait_for_input')["params_input"]["information"] }}
Option: {{ ti.xcom_pull(task_ids='wait_for_option')["chosen_options"] }}
Multiple Options: {{ ti.xcom_pull(task_ids='wait_for_multiple_options')["chosen_options"] }}
Timeout Option: {{ ti.xcom_pull(task_ids='wait_for_default_option')["chosen_options"] }}
""",
defaults="Reject",
execution_timeout=datetime.timedelta(minutes=1),
notifiers=[hitl_request_callback],
on_success_callback=hitl_success_callback,
on_failure_callback=hitl_failure_callback,
assigned_users=[{"id": "admin", "name": "admin"}],
)
# [END howto_hitl_approval_operator]
# [START howto_hitl_branch_operator]
choose_a_branch_to_run = HITLBranchOperator(
task_id="choose_a_branch_to_run",
subject="You're now allowed to proceeded. Please choose one task to run: ",
options=["task_1", "task_2", "task_3"],
notifiers=[hitl_request_callback],
on_success_callback=hitl_success_callback,
on_failure_callback=hitl_failure_callback,
)
# [END howto_hitl_branch_operator]
# [START howto_hitl_workflow]
@task
def task_1(): ...
@task
def task_2(): ...
@task
def task_3(): ...
(
[wait_for_input, wait_for_option, wait_for_default_option, wait_for_multiple_options]
>> valid_input_and_options
>> choose_a_branch_to_run
>> [task_1(), task_2(), task_3()]
)
# [END howto_hitl_workflow]
# [END hitl_tutorial]
| LocalLogNotifier |
python | google__jax | tests/dynamic_api_test.py | {
"start": 21801,
"end": 36955
} | class ____(jtu.JaxTestCase):
def test_jvp_broadcast(self):
@jax.jit
def fn(n, x):
return lax.broadcast_in_dim(x, (n,), ())
outer_jaxpr = jax.make_jaxpr(
lambda x, t: jax.jvp(lambda y: fn(3, y), (x,), (t,))
)(3., 4.)
# { lambda ; a:f32[] b:f32[]. let
# c:f32[3] d:f32[3] = pjit[
# jaxpr={ lambda ; e:i32[] f:f32[] g:f32[]. let
# h:f32[e] = broadcast_in_dim[broadcast_dimensions=() shape=(None,)] f e
# i:f32[e] = broadcast_in_dim[broadcast_dimensions=() shape=(None,)] g e
# in (h, i) }
# name=f
# ] 3 a b
# in (c, d) }
self.assertLen(outer_jaxpr.jaxpr.eqns, 1)
eqn, = outer_jaxpr.jaxpr.eqns
self.assertIn('jaxpr', eqn.params)
jaxpr = eqn.params['jaxpr'].jaxpr
self.assertLen(jaxpr.invars, 3)
e, f, g = jaxpr.invars
self.assertEqual(e.aval.shape, ())
self.assertEqual(f.aval.shape, ())
self.assertEqual(g.aval.shape, ())
self.assertLen(jaxpr.outvars, 2)
h, i = jaxpr.outvars
self.assertEqual(h.aval.shape, (e,))
self.assertEqual(i.aval.shape, (e,))
self.assertLen(eqn.outvars, 2)
c, d = eqn.outvars
self.assertEqual(c.aval.shape, (3,))
self.assertEqual(d.aval.shape, (3,))
def test_jvp_basic(self):
@jax.jit(abstracted_axes=('n',))
def foo(x):
return jnp.sin(x)
x = t = jnp.arange(3.)
outer_jaxpr = jax.make_jaxpr(lambda x, t: jax.jvp(foo, (x,), (t,)))(x, t)
# { lambda ; a:f32[3] b:f32[3]. let
# c:f32[3] d:f32[3] = pjit[
# jaxpr={ lambda ; e:i32[] f:f32[e] g:f32[e]. let
# h:f32[e] = sin f
# i:f32[e] = cos f
# j:f32[e] = mul g i
# in (h, j) }
# name=f
# ] 3 a b
# in (c, d) }
self.assertLen(outer_jaxpr.jaxpr.eqns, 1)
eqn, = outer_jaxpr.eqns
self.assertIn('jaxpr', eqn.params)
jaxpr = eqn.params['jaxpr'].jaxpr
self.assertLen(jaxpr.invars, 3)
e, f, g = jaxpr.invars
self.assertEqual(e.aval.shape, ())
self.assertEqual(f.aval.shape, (e,))
self.assertEqual(g.aval.shape, (e,))
self.assertLen(jaxpr.outvars, 2)
self.assertLen(eqn.outvars, 2)
c, d = eqn.outvars
self.assertEqual(c.aval.shape, (3,))
self.assertEqual(d.aval.shape, (3,))
def test_linearize_basic(self):
@jax.jit(abstracted_axes=('n',))
def foo(x):
return jax.lax.sin(x)
x = jnp.arange(3.)
# primal computation
outer_jaxpr = jax.make_jaxpr(lambda x: jax.linearize(foo, x))(x)
# { lambda ; a:f32[3]. let
# b:f32[3] c:f32[3] = pjit[
# jaxpr={ lambda ; d:i32[] e:f32[d]. let
# f:f32[d] = sin e
# g:f32[d] = cos e
# in (f, g) }
# name=foo
# ] 3 a
# in (b, c) }
self.assertLen(outer_jaxpr.jaxpr.eqns, 1)
eqn, = outer_jaxpr.jaxpr.eqns
self.assertIn('jaxpr', eqn.params)
jaxpr = eqn.params['jaxpr'].jaxpr
self.assertLen(jaxpr.invars, 2)
d, e = jaxpr.invars
self.assertEqual(d.aval.shape, ())
self.assertEqual(e.aval.shape, (d,))
self.assertLen(jaxpr.eqns, 2)
self.assertLen(jaxpr.outvars, 2)
f, g = jaxpr.outvars
self.assertEqual(jaxpr.eqns[0].outvars, [f])
self.assertEqual(jaxpr.eqns[1].outvars, [g])
self.assertLen(eqn.outvars, 2)
b, c = eqn.outvars
self.assertEqual(b.aval.shape, (3,))
self.assertEqual(c.aval.shape, (3,))
# primal and tangent computation
outer_jaxpr = jax.make_jaxpr(
lambda x, xdot: jax.linearize(foo, x)[1](xdot))(x, x)
# { lambda ; a:f32[3] b:f32[3]. let
# _:f32[3] c:f32[3] = pjit[
# jaxpr={ lambda ; d:i32[] e:f32[d]. let
# f:f32[d] = sin e
# g:f32[d] = cos e
# in (f, g) }
# name=foo
# ] 3 a
# h:f32[3] = pjit[
# jaxpr={ lambda ; i:i32[] j:f32[i] k:f32[i]. let
# l:f32[i] = mul k j
# in (l,) }
# name=foo
# ] 3 c b
# in (h,) }
self.assertLen(outer_jaxpr.jaxpr.eqns, 2)
_, eqn = outer_jaxpr.jaxpr.eqns
self.assertIn('jaxpr', eqn.params)
jaxpr = eqn.params['jaxpr'].jaxpr
self.assertLen(jaxpr.invars, 3)
i, j, k = jaxpr.invars
self.assertEqual(i.aval.shape, ())
self.assertEqual(j.aval.shape, (i,))
self.assertEqual(k.aval.shape, (i,))
self.assertLen(eqn.outvars, 1)
h, = eqn.outvars
self.assertEqual(h.aval.shape, (3,))
def test_linearize_basic2(self):
@jax.jit(abstracted_axes=('n',))
def foo(x):
return jax.jit(jax.lax.sin)(x)
x = jnp.arange(3.)
outer_jaxpr = jax.make_jaxpr(lambda x: jax.linearize(foo, x))(x)
# { lambda ; a:f32[3]. let
# b:f32[3] c:f32[3] = pjit[
# jaxpr={ lambda ; d:i32[] e:f32[d]. let
# f:f32[d] g:f32[d] = pjit[
# jaxpr={ lambda ; h:i32[] i:f32[h]. let
# j:f32[h] = sin i
# k:f32[h] = cos i
# in (j, k) }
# name=sin
# ] d e
# in (f, g) }
# name=foo
# ] 3 a
# in (b, c) }
self.assertLen(outer_jaxpr.jaxpr.eqns, 1)
eqn, = outer_jaxpr.jaxpr.eqns
self.assertLen(eqn.outvars, 2)
b, c = eqn.outvars
self.assertEqual(b.aval.shape, (3,))
self.assertEqual(c.aval.shape, (3,))
def test_grad_basic(self):
@jax.jit(abstracted_axes=('n',))
def foo(x):
y = jax.lax.sin(x)
return y.sum()
x = jnp.arange(3.)
outer_jaxpr = jax.make_jaxpr(jax.grad(foo))(x)
# { lambda ; a:f32[3]. let
# _:f32[] b:f32[3] = pjit[
# jaxpr={ lambda ; c:i32[] d:f32[c]. let
# e:f32[c] = sin d
# f:f32[c] = cos d
# g:f32[] = reduce_sum[axes=(0,)] e
# in (g, f) }
# name=foo
# ] 3 a
# h:f32[3] = pjit[
# jaxpr={ lambda ; i:i32[] j:f32[i] k:f32[]. let
# l:f32[i] = broadcast_in_dim[broadcast_dimensions=() shape=(None,)] k i
# m:f32[i] = mul l j
# in (m,) }
# name=foo
# ] 3 b 1.0
# in (h,) }
self.assertLen(outer_jaxpr.jaxpr.eqns, 2)
fwd_eqn, bwd_eqn = outer_jaxpr.jaxpr.eqns
self.assertIn('jaxpr', fwd_eqn.params)
fwd_jaxpr = fwd_eqn.params['jaxpr'].jaxpr
self.assertLen(fwd_jaxpr.invars, 2)
c, d = fwd_jaxpr.invars
self.assertEqual(c.aval.shape, ())
self.assertEqual(d.aval.shape, (c,))
self.assertLen(fwd_jaxpr.outvars, 2)
g, f = fwd_jaxpr.outvars
self.assertEqual(g.aval.shape, ())
self.assertEqual(f.aval.shape, (c,))
self.assertLen(fwd_eqn.outvars, 2)
_, b = fwd_eqn.outvars
self.assertEqual(b.aval.shape, (3,))
self.assertIn('jaxpr', bwd_eqn.params)
bwd_jaxpr = bwd_eqn.params['jaxpr'].jaxpr
self.assertLen(bwd_jaxpr.invars, 3)
i, j, k = bwd_jaxpr.invars
self.assertEqual(i.aval.shape, ())
self.assertEqual(j.aval.shape, (i,))
self.assertEqual(k.aval.shape, ())
self.assertLen(bwd_jaxpr.outvars, 1)
m, = bwd_jaxpr.outvars
self.assertEqual(m.aval.shape, (i,))
self.assertLen(bwd_eqn.outvars, 1)
h, = bwd_eqn.outvars
self.assertEqual(h.aval.shape, (3,))
def test_mlp_autodiff_dynamic_batch_toplevel(self):
def predict(params, inputs):
for W, b in params:
outputs = jnp.dot(inputs, W) + b
inputs = jnp.maximum(0, outputs)
return outputs
def loss(params, batch):
inputs, targets = batch
predictions = predict(params, inputs)
return jnp.sum((predictions - targets) ** 2)
batch = (inputs, targets) = (jnp.ones((128, 784)), jnp.ones((128, 10)))
params = [(jnp.ones((784, 256)), jnp.ones(256)),
(jnp.ones((256, 256)), jnp.ones(256)),
(jnp.ones((256, 10)), jnp.ones( 10))]
# jvp
def loss_jvp(params, batch):
return jax.jvp(loss, (params, batch), (params, batch))
jaxpr = jax.make_jaxpr(loss_jvp, abstracted_axes=({}, {0: 'n'}))(params, batch)
core.check_jaxpr(jaxpr.jaxpr)
# linearize
def loss_lin(params, batch):
y, f_lin = jax.linearize(loss, params, batch)
y_dot = f_lin(params, batch)
return y, y_dot
jaxpr = jax.make_jaxpr(loss_lin, abstracted_axes=({}, {0: 'n'}))(params, batch)
core.check_jaxpr(jaxpr.jaxpr)
# grad
jaxpr = jax.make_jaxpr(jax.grad(loss), abstracted_axes=({}, {0: 'n'}))(params, batch)
core.check_jaxpr(jaxpr.jaxpr)
def test_mlp_autodiff_dynamic_batch_inner(self):
# This is like the above 'toplevel' test, but instead of introducing
# abstracted axes on the make_jaxpr call, we do it on a jit.
@jax.jit(abstracted_axes=({}, {0: 'n'}))
def predict(params, inputs):
for W, b in params:
outputs = jnp.dot(inputs, W) + b
inputs = jnp.maximum(0, outputs)
return outputs
def loss(params, batch):
inputs, targets = batch
predictions = predict(params, inputs)
return jnp.sum((predictions - targets) ** 2)
batch = (inputs, targets) = (jnp.ones((128, 784)), jnp.ones((128, 10)))
params = [(jnp.ones((784, 256)), jnp.ones(256)),
(jnp.ones((256, 256)), jnp.ones(256)),
(jnp.ones((256, 10)), jnp.ones( 10))]
# jvp
def loss_jvp(params, batch):
return jax.jvp(loss, (params, batch), (params, batch))
jaxpr = jax.make_jaxpr(loss_jvp)(params, batch)
core.check_jaxpr(jaxpr.jaxpr)
# linearize
def loss_lin(params, batch):
y, f_lin = jax.linearize(loss, params, batch)
y_dot = f_lin(params, batch)
return y, y_dot
jaxpr = jax.make_jaxpr(loss_lin)(params, batch)
core.check_jaxpr(jaxpr.jaxpr)
# grad
jaxpr = jax.make_jaxpr(jax.grad(loss))(params, batch)
core.check_jaxpr(jaxpr.jaxpr)
def test_bint_broadcast(self):
d = lax.convert_element_type(3, core.bint(5))
bint = lambda x, b: lax.convert_element_type(x, core.bint(b))
x = lax.broadcast_in_dim(0, (d,), ()) # doesn't crash
self.assertIsInstance(x, core.DArray)
self.assertAllClose(x._data, np.zeros(5, dtype='int32'), check_dtypes=False)
self.assertEqual(
x._aval, core.DShapedArray((bint(3, 5),), x._data.dtype, True))
def f(n):
return jnp.zeros(n)
x = jax.jit(f)(d)
self.assertIsInstance(x, core.DArray)
self.assertAllClose(x._data, np.zeros(5, dtype='int32'), check_dtypes=False)
self.assertEqual(
x._aval, core.DShapedArray((bint(3, 5),), x._data.dtype, False))
jaxpr = jax.make_jaxpr(f)(d).jaxpr
# { lambda ; a:bint{≤5}[]. let
# b:f32[a] = broadcast_in_dim[...] 0.0 a
# in (b,) }
self.assertLen(jaxpr.invars, 1)
a, = jaxpr.invars
self.assertEqual(a.aval, core.DShapedArray((), core.bint(5)))
self.assertLen(jaxpr.eqns, 1)
eqn, = jaxpr.eqns
self.assertLen(eqn.outvars, 1)
b, = eqn.outvars
self.assertEqual(b.aval.shape, (a,))
def test_vmap_abstracted_axis(self):
def foo(x, y):
z = jax.vmap(jnp.sin)(x) * y
return jax.vmap(jnp.add)(x, z)
x = jnp.arange(3.)
jaxpr = jax.make_jaxpr(foo, abstracted_axes=('n',))(x, x).jaxpr
self.assertLen(jaxpr.invars, 3)
a, b, c = jaxpr.invars
self.assertEqual(a.aval.shape, ())
self.assertEqual(b.aval.shape, (a,))
self.assertEqual(c.aval.shape, (a,))
self.assertLen(jaxpr.eqns, 3)
self.assertLen(jaxpr.outvars, 1)
f, = jaxpr.outvars
self.assertEqual(f.aval.shape, (a,))
def test_vmap_abstracted_axes_2d(self):
def foo(x, y):
z = jax.vmap(jax.vmap(jnp.sin))(x) * y
return jax.vmap(jax.vmap(jnp.add))(x, z)
x = jnp.arange(12.).reshape(3, 4)
jaxpr = jax.make_jaxpr(foo, abstracted_axes=('n', 'm'))(x, x).jaxpr
self.assertLen(jaxpr.invars, 4)
a, b, c, d = jaxpr.invars
self.assertEqual(a.aval.shape, ())
self.assertEqual(b.aval.shape, ())
self.assertEqual(c.aval.shape, (a, b))
self.assertEqual(c.aval.shape, (a, b))
self.assertLen(jaxpr.eqns, 3)
self.assertLen(jaxpr.outvars, 1)
f, = jaxpr.outvars
self.assertEqual(f.aval.shape, (a, b))
def test_vmap_of_indexing_basic(self):
x = jnp.arange(3.)
def f(idxs):
return jax.vmap(lambda i: x[i])(idxs)
idxs = jnp.arange(3)
jaxpr = jax.make_jaxpr(f, abstracted_axes=('n',))(idxs).jaxpr
# { lambda a:f32[3]; b:i32[] c:i32[b]. let
# d:bool[b] = lt c 0
# e:i32[b] = add c 3
# f:i32[b] = select_n d c e
# g:i32[b,1] = broadcast_in_dim[broadcast_dimensions=(0,) shape=(None, 1)] f b
# h:f32[b,1] = gather[
# dimension_numbers=GatherDimensionNumbers(offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,))
# fill_value=None
# indices_are_sorted=False
# mode=GatherScatterMode.PROMISE_IN_BOUNDS
# slice_sizes=(1,)
# unique_indices=False
# ] a g
# i:f32[b] = squeeze[dimensions=(1,)] h
# in (i,) }
b, _ = jaxpr.invars
e, = (e for e in jaxpr.eqns if str(e.primitive) == 'gather')
h, = e.outvars
self.assertEqual(h.aval.shape, (b, 1))
def test_einsum_basic(self):
x = jnp.arange(20.).reshape(4, 5)
def f(x):
return jnp.einsum('ij,kj->ik', x, x)
jaxpr = jax.make_jaxpr(f, abstracted_axes=('n', 'm'))(x).jaxpr
# { lambda ; a:i32[] b:i32[] c:f32[a,b]. let
# d:f32[a,a] = pjit[
# jaxpr={ lambda ; e:i32[] f:i32[] g:f32[e,f] h:f32[e,f]. let
# i:f32[e,e] = dot_general[
# dimension_numbers=(((1,), (1,)), ((), ()))
# precision=None
# preferred_element_type=None
# ] g h
# in (i,) }
# name=_einsum
# ] a b c c
# in (d,) }
self.assertLen(jaxpr.invars, 3)
a, b, c = jaxpr.invars
self.assertEqual(c.aval.shape[0], a)
self.assertLen(jaxpr.eqns, 1)
self.assertLen(jaxpr.eqns[0].outvars, 1)
d, = jaxpr.eqns[0].outvars
self.assertEqual(d.aval.shape, (a, a))
def test_inferring_valid_subjaxpr_type_add(self):
def f(x):
return x + x.shape[0]
jax.make_jaxpr(f, abstracted_axes=('n',))(jnp.arange(3)) # doesn't crash
def test_slicing_basic_jaxpr(self):
def f(x):
return x[0]
jaxpr = jax.make_jaxpr(f, abstracted_axes=(None, 'n'))(jnp.zeros((3, 4)))
# { lambda ; a:i32[] b:f32[3,a]. let
# c:f32[1,a] = dynamic_slice[slice_sizes=(1, None)] b 0 0 a
# d:f32[a] = squeeze[dimensions=(0,)] c
# in (d,) }
self.assertLen(jaxpr.jaxpr.invars, 2)
a, _ = jaxpr.jaxpr.invars
self.assertLen(jaxpr.jaxpr.outvars, 1)
d, = jaxpr.jaxpr.outvars
self.assertLen(d.aval.shape, 1)
self.assertEqual(d.aval.shape, (a,))
def test_shape_tuple_argument_to_zeros(self):
@jax.jit(abstracted_axes=(('n',), ('n',)))
def f(x, y):
zero = jnp.zeros(jnp.shape(x))
return zero * y
x = jnp.arange(3.0)
y = jnp.arange(3.0) + 1
jax.make_jaxpr(f)(x, y) # doesn't crash
@unittest.skip("Test does not work with jax.Array")
@jtu.with_config(jax_dynamic_shapes=True, jax_numpy_rank_promotion="allow")
| DynamicShapeAutodiffTest |
python | python-pillow__Pillow | Tests/test_file_tiff.py | {
"start": 36249,
"end": 36890
} | class ____:
def test_fd_leak(self, tmp_path: Path) -> None:
tmpfile = tmp_path / "temp.tif"
# this is an mmaped file.
with Image.open("Tests/images/uint16_1_4660.tif") as im:
im.save(tmpfile)
im = Image.open(tmpfile)
fp = im.fp
assert not fp.closed
with pytest.raises(OSError):
os.remove(tmpfile)
im.load()
assert fp.closed
# this closes the mmap
im.close()
# this should not fail, as load should have closed the file pointer,
# and close should have closed the mmap
os.remove(tmpfile)
| TestFileTiffW32 |
python | pytorch__pytorch | test/pytest_shard_custom.py | {
"start": 908,
"end": 2307
} | class ____:
def __init__(self, config):
self.config = config
def pytest_report_collectionfinish(self, config, items) -> str:
"""Log how many and which items are tested in this shard."""
msg = f"Running {len(items)} items in this shard"
if config.getoption("print_items"):
msg += ": " + ", ".join([item.nodeid for item in items])
return msg
def sha256hash(self, x: str) -> int:
return int.from_bytes(hashlib.sha256(x.encode()).digest(), "little")
def filter_items_by_shard(self, items, shard_id: int, num_shards: int):
"""Computes `items` that should be tested in `shard_id` out of `num_shards` total shards."""
new_items = [
item
for item in items
if self.sha256hash(item.nodeid) % num_shards == shard_id - 1
]
return new_items
def pytest_collection_modifyitems(self, config, items):
"""Mutate the collection to consist of just items to be tested in this shard."""
shard_id = config.getoption("shard_id")
shard_total = config.getoption("num_shards")
if shard_id < 1 or shard_id > shard_total:
raise ValueError(
f"{shard_id} is not a valid shard ID out of {shard_total} total shards"
)
items[:] = self.filter_items_by_shard(items, shard_id, shard_total)
| PytestShardPlugin |
python | ZoranPandovski__al-go-rithms | data_structures/Tree/Binary-tree/python/BinaryTree.py | {
"start": 38,
"end": 277
} | class ____:
def __init__(self, key):
self.right = None
self.left = None
self.key = key
def addLeftChild(self, node):
self.left = node
def addRightChild(self, node):
self.right = node
| Node |
python | getsentry__sentry | src/sentry_plugins/twilio/plugin.py | {
"start": 3315,
"end": 6531
} | class ____(CorePluginMixin, NotificationPlugin):
version = sentry.VERSION
description = DESCRIPTION
resource_links = [
(
"Documentation",
"https://github.com/getsentry/sentry/blob/master/src/sentry_plugins/twilio/Twilio_Instructions.md",
),
("Report Issue", "https://github.com/getsentry/sentry/issues"),
(
"View Source",
"https://github.com/getsentry/sentry/tree/master/src/sentry_plugins/twilio",
),
("Twilio", "https://www.twilio.com/"),
]
slug = "twilio"
title = _("Twilio (SMS)")
conf_title = title
conf_key = "twilio"
required_field = "account_sid"
project_conf_form = TwilioConfigurationForm
feature_descriptions = [
FeatureDescription(
"""
Set up SMS notifications to be sent to your mobile device via Twilio.
""",
IntegrationFeatures.MOBILE,
),
FeatureDescription(
"""
Configure Sentry rules to trigger notifications based on conditions you set.
""",
IntegrationFeatures.ALERT_RULE,
),
]
def is_configured(self, project) -> bool:
return all(
[
self.get_option(o, project)
for o in ("account_sid", "auth_token", "sms_from", "sms_to")
]
)
def get_send_to(self, *args, **kwargs):
# This doesn't depend on email permission... stuff.
return True
def error_message_from_json(self, data):
code = data.get("code")
message = data.get("message")
more_info = data.get("more_info")
error_message = f"{code} - {message} {more_info}"
if message:
return error_message
return None
def notify_users(self, group, event, triggering_rules) -> None:
if not self.is_configured(group.project):
return
project = group.project
body = b"Sentry [%s] %s: %s" % (
project.name.encode("utf-8"),
event.group.get_level_display().upper().encode("utf-8"),
event.title.encode("utf-8").splitlines()[0],
)
body = body[:MAX_SMS_LENGTH]
client = self.get_client(group.project)
payload = {"From": client.sms_from, "Body": body}
errors = []
for phone in client.sms_to:
if not phone:
continue
try:
# TODO: Use API client with raise_error
phone = clean_phone(phone)
payload = payload.copy()
payload["To"] = phone
client.request(payload)
except Exception as e:
errors.append(e)
if errors:
self.raise_error(errors[0])
def get_client(self, project):
account_sid = self.get_option("account_sid", project)
auth_token = self.get_option("auth_token", project)
sms_from = clean_phone(self.get_option("sms_from", project))
sms_to = self.get_option("sms_to", project)
sms_to = split_sms_to(sms_to)
return TwilioApiClient(account_sid, auth_token, sms_from, sms_to)
| TwilioPlugin |
python | plotly__plotly.py | plotly/graph_objs/contour/_textfont.py | {
"start": 233,
"end": 9936
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contour"
_path_str = "contour.textfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Textfont object
For this trace it only has an effect if `coloring` is set to
"heatmap". Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contour.Textfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contour.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Textfont |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/serializers/test_data_source_serializer.py | {
"start": 558,
"end": 2081
} | class ____(TestCase):
def test_serialize(self) -> None:
snuba_query = create_snuba_query(
SnubaQuery.Type.ERROR,
Dataset.Events,
"hello",
"count()",
timedelta(minutes=1),
timedelta(minutes=1),
None,
)
type_name = data_source_type_registry.get_key(QuerySubscriptionDataSourceHandler)
subscription = create_snuba_subscription(
self.project, INCIDENTS_SNUBA_SUBSCRIPTION_TYPE, snuba_query
)
data_source = self.create_data_source(
organization=self.organization,
type=type_name,
source_id=str(subscription.id),
)
result = serialize(data_source)
assert result == {
"id": str(data_source.id),
"organizationId": str(self.organization.id),
"type": type_name,
"sourceId": str(subscription.id),
"queryObj": {
"id": str(subscription.id),
"snubaQuery": {
"aggregate": "count()",
"dataset": "events",
"environment": None,
"id": str(snuba_query.id),
"query": "hello",
"timeWindow": 60,
"eventTypes": ["error"],
"extrapolationMode": "unknown",
},
"status": 1,
"subscription": None,
},
}
| TestDataSourceSerializer |
python | python-markdown__markdown | markdown/extensions/sane_lists.py | {
"start": 1729,
"end": 2150
} | class ____(Extension):
""" Add sane lists to Markdown. """
def extendMarkdown(self, md):
""" Override existing Processors. """
md.parser.blockprocessors.register(SaneOListProcessor(md.parser), 'olist', 40)
md.parser.blockprocessors.register(SaneUListProcessor(md.parser), 'ulist', 30)
def makeExtension(**kwargs): # pragma: no cover
return SaneListExtension(**kwargs)
| SaneListExtension |
python | pandas-dev__pandas | pandas/tests/indexes/period/test_constructors.py | {
"start": 25525,
"end": 25997
} | class ____:
def test_constructor_cant_cast_period(self):
msg = "Cannot cast PeriodIndex to dtype float64"
with pytest.raises(TypeError, match=msg):
Series(period_range("2000-01-01", periods=10, freq="D"), dtype=float)
def test_constructor_cast_object(self):
pi = period_range("1/1/2000", periods=10)
ser = Series(pi, dtype=PeriodDtype("D"))
exp = Series(pi)
tm.assert_series_equal(ser, exp)
| TestSeriesPeriod |
python | huggingface__transformers | src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py | {
"start": 8118,
"end": 10379
} | class ____(MobileNetV1PreTrainedModel):
def __init__(self, config: MobileNetV1Config) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilenet_v1 = MobileNetV1Model(config)
last_hidden_size = self.mobilenet_v1.layer[-1].convolution.out_channels
# Classifier head
self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilenet_v1(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(self.dropout(pooled_output))
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
)
__all__ = [
"MobileNetV1ForImageClassification",
"MobileNetV1Model",
"MobileNetV1PreTrainedModel",
]
| MobileNetV1ForImageClassification |
python | redis__redis-py | redis/commands/search/reducers.py | {
"start": 2078,
"end": 2378
} | class ____(Reducer):
"""
Return the value for the nth percentile within the range of values for the
field within the group.
"""
NAME = "QUANTILE"
def __init__(self, field: str, pct: float) -> None:
super().__init__(field, str(pct))
self._field = field
| quantile |
python | realpython__materials | python-class/employee.py | {
"start": 32,
"end": 1021
} | class ____:
company = "Example, Inc."
def __init__(self, name, birth_date):
self.name = name
self.birth_date = birth_date
@property
def birth_date(self):
return self._birth_date
@birth_date.setter
def birth_date(self, value):
self._birth_date = datetime.fromisoformat(value)
def compute_age(self):
today = datetime.today()
age = today.year - self.birth_date.year
birthday = datetime(
today.year, self.birth_date.month, self.birth_date.day
)
if today < birthday:
age -= 1
return age
@classmethod
def from_dict(cls, data_dict):
return cls(**data_dict)
def __str__(self):
return f"{self.name} is {self.compute_age()} years old"
def __repr__(self):
return (
f"{type(self).__name__}("
f"name='{self.name}', "
f"birth_date='{self.birth_date.strftime('%Y-%m-%d')}')"
)
| Employee |
python | getsentry__sentry | tests/sentry/issue_detection/test_m_n_plus_one_db_detector.py | {
"start": 875,
"end": 8320
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self._settings = get_detection_settings()
def find_problems(
self, event: dict[str, Any], settings: dict[DetectorType, Any] | None = None
) -> list[PerformanceProblem]:
detector_settings = settings or self._settings
detector = MNPlusOneDBSpanDetector(detector_settings, event)
run_detector_on_data(detector, event)
return list(detector.stored_problems.values())
def test_detects_parallel_m_n_plus_one(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql")
problems = self.find_problems(event)
assert problems == [
PerformanceProblem(
fingerprint="1-1011-6807a9d5bedb6fdb175b006448cddf8cdf18fbd8",
op="db",
type=PerformanceNPlusOneGroupType,
desc="SELECT id, name FROM authors INNER JOIN book_authors ON author_id = id WHERE book_id = $1",
parent_span_ids=[],
cause_span_ids=[],
offender_span_ids=[
"9c5049407f37a364",
"ad1453eb469473f5",
"9ac8fee795f25a28",
"aacda642ff6787c0",
"b231fb2367a40bb2",
"9abcfbac864d1b09",
"a4acb0c08f6c5392",
"a1dbea4273c7a8cf",
"b8467be28b0edef0",
"9677584719fa33f9",
"8c6aa95b24d15772",
"be7d04a1731d5d10",
"baa57006cb44092a",
"a383cd625dff4809",
"9c48fda36f28cb0a",
"82253694a3a68c93",
"8831cccebb865893",
"a2339eabb5c4cf07",
"8ea362c64d8b9fd9",
"b8f8a99b783f7b48",
"87a6041001b4e8f6",
"ab99c67643fd85cf",
"a96783f2f544024a",
"8e110c4aa54e4aa0",
],
evidence_data={
"op": "db",
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": [
"9c5049407f37a364",
"ad1453eb469473f5",
"9ac8fee795f25a28",
"aacda642ff6787c0",
"b231fb2367a40bb2",
"9abcfbac864d1b09",
"a4acb0c08f6c5392",
"a1dbea4273c7a8cf",
"b8467be28b0edef0",
"9677584719fa33f9",
"8c6aa95b24d15772",
"be7d04a1731d5d10",
"baa57006cb44092a",
"a383cd625dff4809",
"9c48fda36f28cb0a",
"82253694a3a68c93",
"8831cccebb865893",
"a2339eabb5c4cf07",
"8ea362c64d8b9fd9",
"b8f8a99b783f7b48",
"87a6041001b4e8f6",
"ab99c67643fd85cf",
"a96783f2f544024a",
"8e110c4aa54e4aa0",
],
},
evidence_display=[],
)
]
assert problems[0].title == "N+1 Query"
def test_does_not_detect_truncated_m_n_plus_one(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql-truncated")
assert self.find_problems(event) == []
def test_does_not_detect_n_plus_one(self) -> None:
event = get_event("n-plus-one-db/n-plus-one-in-django-index-view")
assert self.find_problems(event) == []
def test_does_not_detect_when_parent_is_transaction(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql-transaction-parent")
assert self.find_problems(event) == []
def test_m_n_plus_one_detector_enabled(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql")
sdk_span_mock = Mock()
_detect_performance_problems(event, sdk_span_mock, self.create_project())
sdk_span_mock.containing_transaction.set_tag.assert_has_calls(
[
call("_pi_all_issue_count", 1),
call("_pi_sdk_name", "sentry.javascript.node"),
call("is_standalone_spans", False),
call("_pi_transaction", "3818ae4f54ba4fa6ac6f68c9e32793c4"),
call(
"_pi_m_n_plus_one_db_fp",
"1-1011-6807a9d5bedb6fdb175b006448cddf8cdf18fbd8",
),
call("_pi_m_n_plus_one_db", "9c5049407f37a364"),
]
)
def test_m_n_plus_one_does_not_include_extra_span(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-off-by-one")
assert self.find_problems(event) == []
def test_m_n_plus_one_ignores_redis(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-redis")
assert self.find_problems(event) == []
def test_m_n_plus_one_ignores_mostly_not_db(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-mostly-http")
assert self.find_problems(event) == []
def test_respects_project_option(self) -> None:
project = self.create_project()
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql")
event["project_id"] = project.id
settings = get_detection_settings(project.id)
detector = MNPlusOneDBSpanDetector(settings, event)
assert detector.is_creation_allowed_for_project(project)
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"n_plus_one_db_queries_detection_enabled": False},
)
settings = get_detection_settings(project.id)
detector = MNPlusOneDBSpanDetector(settings, event)
assert not detector.is_creation_allowed_for_project(project)
def test_respects_n_plus_one_db_duration_threshold(self) -> None:
project = self.create_project()
# Total duration subceeds the threshold
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"n_plus_one_db_duration_threshold": 500},
)
event = get_event("m-n-plus-one-db/m-n-plus-one-graphql")
event["project_id"] = project.id
settings = get_detection_settings(project_id=project.id)
assert self.find_problems(event, settings) == []
# Total duration exceeds the threshold
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"n_plus_one_db_duration_threshold": 100},
)
settings = get_detection_settings(project_id=project.id)
assert len(self.find_problems(event, settings)) == 1
# The mocked event has span duration that doesn't make up at least 10% of the total offender spans duration.
def test_db_spans_duration_subceeds_pct(self) -> None:
event = get_event("m-n-plus-one-db/m-n-plus-one-db-spans-duration-suceeds")
assert self.find_problems(event) == []
| MNPlusOneDBDetectorTest |
python | apache__airflow | providers/edge3/src/airflow/providers/edge3/models/edge_worker.py | {
"start": 1942,
"end": 3565
} | class ____(str, Enum):
"""Status of a Edge Worker instance."""
STARTING = "starting"
"""Edge Worker is in initialization."""
RUNNING = "running"
"""Edge Worker is actively running a task."""
IDLE = "idle"
"""Edge Worker is active and waiting for a task."""
SHUTDOWN_REQUEST = "shutdown request"
"""Request to shutdown Edge Worker is issued. It will be picked-up on the next heartbeat, tasks will drain and then worker will terminate."""
TERMINATING = "terminating"
"""Edge Worker is completing work (draining running tasks) and stopping."""
OFFLINE = "offline"
"""Edge Worker was shut down."""
UNKNOWN = "unknown"
"""No heartbeat signal from worker for some time, Edge Worker probably down or got disconnected."""
MAINTENANCE_REQUEST = "maintenance request"
"""Worker was requested to enter maintenance mode. Once worker receives this message it will pause fetching tasks and drain tasks."""
MAINTENANCE_PENDING = "maintenance pending"
"""Edge Worker received the request for maintenance, waiting for tasks to finish. Once tasks are finished will move to 'maintenance mode'."""
MAINTENANCE_MODE = "maintenance mode"
"""Edge Worker is in maintenance mode. It is online but pauses fetching tasks."""
MAINTENANCE_EXIT = "maintenance exit"
"""Request Worker is requested to exit maintenance mode. Once the worker receives this state it will un-pause and fetch new tasks."""
OFFLINE_MAINTENANCE = "offline maintenance"
"""Worker was shut down in maintenance mode. It will be in maintenance mode when restarted."""
| EdgeWorkerState |
python | kubernetes-client__python | kubernetes/base/dynamic/discovery.py | {
"start": 16871,
"end": 16965
} | class ____(json.JSONEncoder):
def default(self, o):
return o.to_dict()
| CacheEncoder |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 8018,
"end": 8403
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
n_missing = metafeatures.get_value("NumberOfFeaturesWithMissingValues")
n_total = float(metafeatures["NumberOfFeatures"](X, y, logger).value)
return float(n_missing / n_total)
@metafeatures.define("NumberOfMissingValues", dependency="MissingValues")
| PercentageOfFeaturesWithMissingValues |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 9492,
"end": 9844
} | class ____(HTTPError):
"""Raised by assert_header_parsing, but we convert it to a log.warning statement."""
def __init__(
self, defects: list[MessageDefect], unparsed_data: bytes | str | None
) -> None:
message = f"{defects or 'Unknown'}, unparsed data: {unparsed_data!r}"
super().__init__(message)
| HeaderParsingError |
python | keras-team__keras | keras/src/metrics/confusion_metrics_test.py | {
"start": 32127,
"end": 35620
} | class ____(testing.TestCase):
def test_config(self):
s_obj = metrics.PrecisionAtRecall(
0.4, num_thresholds=100, class_id=12, name="precision_at_recall_1"
)
self.assertEqual(s_obj.name, "precision_at_recall_1")
self.assertLen(s_obj.variables, 4)
self.assertEqual(s_obj.recall, 0.4)
self.assertEqual(s_obj.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
# Check save and restore config
s_obj2 = metrics.PrecisionAtRecall.from_config(s_obj.get_config())
self.assertEqual(s_obj2.name, "precision_at_recall_1")
self.assertLen(s_obj2.variables, 4)
self.assertEqual(s_obj2.recall, 0.4)
self.assertEqual(s_obj2.num_thresholds, 100)
self.assertEqual(s_obj.class_id, 12)
def test_unweighted_all_correct(self):
s_obj = metrics.PrecisionAtRecall(0.7)
inputs = np.random.randint(0, 2, size=(100, 1))
y_pred = np.array(inputs, dtype="float32")
y_true = np.array(inputs)
self.assertAlmostEqual(1, s_obj(y_true, y_pred))
def test_unweighted_high_recall(self):
s_obj = metrics.PrecisionAtRecall(0.8)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
# For 0.5 < decision threshold < 0.6.
self.assertAlmostEqual(2.0 / 3, s_obj(y_true, y_pred))
def test_unweighted_low_recall(self):
s_obj = metrics.PrecisionAtRecall(0.6)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
y_pred = np.array(pred_values, dtype="float32")
y_true = np.array(label_values)
# For 0.2 < decision threshold < 0.5.
self.assertAlmostEqual(0.75, s_obj(y_true, y_pred))
def test_unweighted_class_id(self):
s_obj = metrics.PrecisionAtRecall(0.6, class_id=2)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
y_pred = ops.transpose(np.array([pred_values] * 3))
y_true = ops.one_hot(np.array(label_values), num_classes=3)
# For 0.2 < decision threshold < 0.5.
self.assertAlmostEqual(0.75, s_obj(y_true, y_pred))
@parameterized.parameters(["bool", "int32", "float32"])
def test_weighted(self, label_dtype):
s_obj = metrics.PrecisionAtRecall(7.0 / 8)
pred_values = [0.0, 0.1, 0.2, 0.5, 0.6, 0.2, 0.5, 0.6, 0.8, 0.9]
label_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weight_values = [2, 1, 2, 1, 2, 1, 2, 2, 1, 2]
y_pred = np.array(pred_values, dtype="float32")
y_true = ops.cast(label_values, dtype=label_dtype)
weights = np.array(weight_values)
result = s_obj(y_true, y_pred, sample_weight=weights)
# For 0.0 < decision threshold < 0.2.
self.assertAlmostEqual(0.7, result)
def test_invalid_sensitivity(self):
with self.assertRaisesRegex(
ValueError, r"`recall` must be in the range \[0, 1\]."
):
metrics.PrecisionAtRecall(-1)
def test_invalid_num_thresholds(self):
with self.assertRaisesRegex(
ValueError, "Argument `num_thresholds` must be an integer > 0"
):
metrics.PrecisionAtRecall(0.4, num_thresholds=-1)
| PrecisionAtRecallTest |
python | walkccc__LeetCode | solutions/1360. Number of Days Between Two Dates/1360.py | {
"start": 0,
"end": 549
} | class ____:
def daysBetweenDates(self, date1: str, date2: str) -> int:
days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def isLeapYear(year: int) -> bool:
return year % 4 == 0 and year % 100 != 0 or year % 400 == 0
def daysFrom1971(date: str) -> int:
year, month, day = map(int, date.split('-'))
return (365 * (year - 1971) + sum(map(isLeapYear, range(1971, year))) +
sum(days[:month]) + day + (month > 2 and isLeapYear(year)))
return abs(daysFrom1971(date1) - daysFrom1971(date2))
| Solution |
python | kamyu104__LeetCode-Solutions | Python/minimize-the-maximum-difference-of-pairs.py | {
"start": 96,
"end": 751
} | class ____(object):
def minimizeMax(self, nums, p):
"""
:type nums: List[int]
:type p: int
:rtype: int
"""
def check(x):
i = cnt = 0
while i+1 < len(nums) and cnt < p:
if nums[i+1]-nums[i] <= x:
i += 1
cnt += 1
i += 1
return cnt == p
nums.sort()
left, right = 0, nums[-1]-nums[0]
while left <= right:
mid = left + (right-left)//2
if check(mid):
right = mid-1
else:
left = mid+1
return left
| Solution |
python | sympy__sympy | sympy/stats/drv.py | {
"start": 5943,
"end": 6076
} | class ____(DiscreteDomain, SingleDomain):
def as_boolean(self):
return Contains(self.symbol, self.set)
| SingleDiscreteDomain |
python | wntrblm__nox | nox/_options.py | {
"start": 1233,
"end": 21827
} | class ____(str):
__slots__ = ()
ReuseVenvType = Literal["no", "yes", "never", "always"]
options = _option_set.OptionSet(
description="Nox is a Python automation toolkit.", add_help=False
)
options.add_groups(
_option_set.OptionGroup(
"general",
"General options",
"These are general arguments used when invoking Nox.",
),
_option_set.OptionGroup(
"sessions",
"Sessions options",
"These arguments are used to control which Nox session(s) to execute.",
),
_option_set.OptionGroup(
"python",
"Python options",
"These arguments are used to control which Python version(s) to use.",
),
_option_set.OptionGroup(
"environment",
"Environment options",
"These arguments are used to control Nox's creation and usage of virtual"
" environments.",
),
_option_set.OptionGroup(
"execution",
"Execution options",
"These arguments are used to control execution of sessions.",
),
_option_set.OptionGroup(
"reporting",
"Reporting options",
"These arguments are used to control Nox's reporting during execution.",
),
)
def _sessions_merge_func(
key: str, command_args: argparse.Namespace, noxfile_args: NoxOptions
) -> list[str]:
"""Only return the Noxfile value for sessions/keywords if neither sessions,
keywords or tags are specified on the command-line.
Args:
key (str): This function is used for both the "sessions" and "keywords"
options, this allows using ``funtools.partial`` to pass the
same function for both options.
command_args (_option_set.Namespace): The options specified on the
command-line.
noxfile_Args (NoxOptions): The options specified in the
Noxfile."""
if (
not command_args.sessions
and not command_args.keywords
and not command_args.tags
):
return getattr(noxfile_args, key) # type: ignore[no-any-return]
return getattr(command_args, key) # type: ignore[no-any-return]
def _default_venv_backend_merge_func(
command_args: argparse.Namespace, noxfile_args: NoxOptions
) -> str:
"""Merge default_venv_backend from command args and Noxfile. Default is "virtualenv".
Args:
command_args (_option_set.Namespace): The options specified on the
command-line.
noxfile_Args (NoxOptions): The options specified in the
Noxfile.
"""
return (
command_args.default_venv_backend
or noxfile_args.default_venv_backend
or "virtualenv"
)
def _force_venv_backend_merge_func(
command_args: argparse.Namespace, noxfile_args: NoxOptions
) -> str:
"""Merge force_venv_backend from command args and Noxfile. Default is None.
Args:
command_args (_option_set.Namespace): The options specified on the
command-line.
noxfile_Args (NoxOptions): The options specified in the
Noxfile.
"""
if command_args.no_venv:
if (
command_args.force_venv_backend is not None
and command_args.force_venv_backend != "none"
):
msg = "You can not use `--no-venv` with a non-none `--force-venv-backend`"
raise ValueError(msg)
return "none"
return command_args.force_venv_backend or noxfile_args.force_venv_backend # type: ignore[return-value]
def _envdir_merge_func(
command_args: argparse.Namespace, noxfile_args: NoxOptions
) -> str:
"""Ensure that there is always some envdir.
Args:
command_args (_option_set.Namespace): The options specified on the
command-line.
noxfile_Args (NoxOptions): The options specified in the
Noxfile.
"""
return os.fspath(command_args.envdir or noxfile_args.envdir or ".nox")
def _reuse_venv_merge_func(
command_args: argparse.Namespace, noxfile_args: NoxOptions
) -> ReuseVenvType:
"""Merge reuse_venv from command args and Noxfile while maintaining
backwards compatibility with reuse_existing_virtualenvs. Default is "no".
Args:
command_args (_option_set.Namespace): The options specified on the
command-line.
noxfile_Args (NoxOptions): The options specified in the
Noxfile.
"""
# back-compat scenario with no_reuse_existing_virtualenvs/reuse_existing_virtualenvs
if command_args.no_reuse_existing_virtualenvs:
return "no"
if (
command_args.reuse_existing_virtualenvs
or noxfile_args.reuse_existing_virtualenvs
):
return "yes"
# regular option behavior
return command_args.reuse_venv or noxfile_args.reuse_venv or "no"
def default_env_var_list_factory(env_var: str) -> Callable[[], list[str] | None]:
"""Looks at the env var to set the default value for a list of env vars.
Args:
env_var (str): The name of the environment variable to look up.
Returns:
A callback that retrieves a list from a comma-delimited environment variable.
"""
def _default_list() -> list[str] | None:
env_value = os.environ.get(env_var)
return env_value.split(",") if env_value else None
return _default_list
def _color_finalizer(_value: bool, args: argparse.Namespace) -> bool: # noqa: FBT001
"""Figures out the correct value for "color" based on the two color flags.
Args:
value (bool): The current value of the "color" option.
args (_option_set.Namespace): The values for all options.
Returns:
The new value for the "color" option.
"""
if args.forcecolor and args.nocolor:
raise _option_set.ArgumentError(
None, "Can not specify both --no-color and --force-color."
)
if args.forcecolor:
return True
if args.nocolor or "NO_COLOR" in os.environ:
return False
return sys.stdout.isatty()
def _force_pythons_finalizer(
value: Sequence[str], args: argparse.Namespace
) -> Sequence[str]:
"""Propagate ``--force-python`` to ``--python`` and ``--extra-python``."""
if value:
args.pythons = args.extra_pythons = value
return value
def _R_finalizer(value: bool, args: argparse.Namespace) -> bool: # noqa: FBT001
"""Propagate -R to --reuse-existing-virtualenvs and --no-install and --reuse-venv=yes."""
if value:
args.reuse_venv = "yes"
args.reuse_existing_virtualenvs = args.no_install = value
return value
def _reuse_existing_virtualenvs_finalizer(
value: bool, # noqa: FBT001
args: argparse.Namespace,
) -> bool:
"""Propagate --reuse-existing-virtualenvs to --reuse-venv=yes."""
if value:
args.reuse_venv = "yes"
return value
def _posargs_finalizer(
value: Sequence[Any], _args: argparse.Namespace
) -> Sequence[Any] | list[Any]:
"""Removes the leading "--"s in the posargs array (if any) and asserts that
remaining arguments came after a "--".
"""
posargs = value
if not posargs:
return []
if "--" not in posargs:
unexpected_posargs = posargs
raise _option_set.ArgumentError(
None, f"Unknown argument(s) '{' '.join(unexpected_posargs)}'."
)
dash_index = posargs.index("--")
if dash_index != 0:
unexpected_posargs = posargs[0:dash_index]
raise _option_set.ArgumentError(
None, f"Unknown argument(s) '{' '.join(unexpected_posargs)}'."
)
return posargs[dash_index + 1 :]
def _python_completer(
prefix: str, # noqa: ARG001
parsed_args: argparse.Namespace,
**kwargs: Any,
) -> Iterable[str]:
module = load_nox_module(parsed_args)
manifest = discover_manifest(module, parsed_args)
return filter(
None,
(
session.func.python # type:ignore[misc] # str sequences flattened, other non-strs falsey and filtered out
for session, _ in manifest.list_all_sessions()
),
)
def _session_completer(
prefix: str, # noqa: ARG001
parsed_args: argparse.Namespace,
**kwargs: Any,
) -> Iterable[str]:
parsed_args.list_sessions = True
module = load_nox_module(parsed_args)
manifest = discover_manifest(module, parsed_args)
filtered_manifest = filter_manifest(manifest, parsed_args)
if isinstance(filtered_manifest, int):
return []
return (
session.friendly_name for session, _ in filtered_manifest.list_all_sessions()
)
def _tag_completer(
prefix: str, # noqa: ARG001
parsed_args: argparse.Namespace,
**kwargs: Any,
) -> Iterable[str]:
module = load_nox_module(parsed_args)
manifest = discover_manifest(module, parsed_args)
return itertools.chain.from_iterable(
filter(None, (session.tags for session, _ in manifest.list_all_sessions()))
)
options.add_options(
_option_set.Option(
"help",
"-h",
"--help",
group=options.groups["general"],
action="store_true",
help="Show this help message and exit.",
),
_option_set.Option(
"version",
"--version",
group=options.groups["general"],
action="store_true",
help="Show the Nox version and exit.",
),
_option_set.Option(
"script_mode",
"--script-mode",
group=options.groups["general"],
choices=["none", "fresh", "reuse"],
default="reuse",
),
_option_set.Option(
"script_venv_backend",
"--script-venv-backend",
group=options.groups["general"],
),
_option_set.Option(
"list_sessions",
"-l",
"--list-sessions",
"--list",
group=options.groups["sessions"],
action="store_true",
help="List all available sessions and exit.",
),
_option_set.Option(
"json",
"--json",
group=options.groups["sessions"],
action="store_true",
help="JSON output formatting. Requires list-sessions currently.",
),
_option_set.Option(
"sessions",
"-s",
"-e",
"--sessions",
"--session",
group=options.groups["sessions"],
noxfile=True,
merge_func=functools.partial(_sessions_merge_func, "sessions"),
nargs="*",
default=default_env_var_list_factory("NOXSESSION"),
help="Which sessions to run. By default, all sessions will run.",
completer=_session_completer,
),
_option_set.Option(
"pythons",
"-p",
"--pythons",
"--python",
group=options.groups["python"],
noxfile=True,
nargs="*",
default=default_env_var_list_factory("NOXPYTHON"),
help="Only run sessions that use the given python interpreter versions.",
completer=_python_completer,
),
_option_set.Option(
"keywords",
"-k",
"--keywords",
group=options.groups["sessions"],
noxfile=True,
merge_func=functools.partial(_sessions_merge_func, "keywords"),
help="Only run sessions that match the given expression.",
completer=argcomplete.completers.ChoicesCompleter(()), # type: ignore[no-untyped-call]
),
_option_set.Option(
"tags",
"-t",
"--tags",
group=options.groups["sessions"],
noxfile=True,
merge_func=functools.partial(_sessions_merge_func, "tags"),
nargs="*",
help="Only run sessions with the given tags.",
completer=_tag_completer,
),
_option_set.Option(
"posargs",
"posargs",
group=options.groups["general"],
nargs=argparse.REMAINDER,
help="Arguments following ``--`` that are passed through to the session(s).",
finalizer_func=_posargs_finalizer,
),
*_option_set.make_flag_pair(
"verbose",
("-v", "--verbose"),
("--no-verbose",),
group=options.groups["reporting"],
help="Logs the output of all commands run including commands marked silent.",
),
_option_set.Option(
"add_timestamp",
"-ts",
"--add-timestamp",
group=options.groups["reporting"],
action="store_true",
help="Adds a timestamp to logged output.",
),
_option_set.Option(
"default_venv_backend",
"-db",
"--default-venv-backend",
group=options.groups["environment"],
noxfile=True,
default=lambda: os.environ.get("NOX_DEFAULT_VENV_BACKEND"),
merge_func=_default_venv_backend_merge_func,
help=(
"Virtual environment backend to use by default for Nox sessions, this is"
f" ``'virtualenv'`` by default but any of ``{list(ALL_VENVS)!r}`` are accepted."
),
choices=list(ALL_VENVS),
),
_option_set.Option(
"force_venv_backend",
"-fb",
"--force-venv-backend",
group=options.groups["environment"],
noxfile=True,
merge_func=_force_venv_backend_merge_func,
help=(
"Virtual environment backend to force-use for all Nox sessions in this run,"
" overriding any other venv backend declared in the Noxfile and ignoring"
f" the default backend. Any of ``{list(ALL_VENVS)!r}`` are accepted."
),
choices=list(ALL_VENVS),
),
_option_set.Option(
"no_venv",
"--no-venv",
group=options.groups["environment"],
default=False,
action="store_true",
help=(
"Runs the selected sessions directly on the current interpreter, without"
" creating a venv. This is an alias for '--force-venv-backend none'."
),
),
_option_set.Option(
"reuse_venv",
"--reuse-venv",
group=options.groups["environment"],
noxfile=True,
merge_func=_reuse_venv_merge_func,
help=(
"Controls existing virtualenvs recreation. This is ``'no'`` by"
" default, but any of ``('yes', 'no', 'always', 'never')`` are accepted."
),
choices=["yes", "no", "always", "never"],
),
*_option_set.make_flag_pair(
"reuse_existing_virtualenvs",
("-r", "--reuse-existing-virtualenvs"),
(
"-N",
"--no-reuse-existing-virtualenvs",
),
group=options.groups["environment"],
help="This is an alias for '--reuse-venv=yes|no'.",
finalizer_func=_reuse_existing_virtualenvs_finalizer,
),
_option_set.Option(
"R",
"-R",
default=False,
group=options.groups["environment"],
action="store_true",
help=(
"Reuse existing virtualenvs and skip package re-installation."
" This is an alias for '--reuse-existing-virtualenvs --no-install'."
),
finalizer_func=_R_finalizer,
),
_option_set.Option(
"noxfile",
"-f",
"--noxfile",
group=options.groups["general"],
default=DefaultStr("noxfile.py"),
help="Location of the Python file containing Nox sessions.",
),
_option_set.Option(
"envdir",
"--envdir",
noxfile=True,
merge_func=_envdir_merge_func,
group=options.groups["environment"],
help="Directory where Nox will store virtualenvs, this is ``.nox`` by default.",
completer=argcomplete.completers.DirectoriesCompleter(), # type: ignore[no-untyped-call]
),
_option_set.Option(
"download_python",
"--download-python",
"--download-python",
noxfile=True,
group=options.groups["python"],
default=lambda: os.getenv("NOX_DOWNLOAD_PYTHON"),
help=(
"When should nox download python standalone builds to run the sessions,"
" defaults to 'auto' which will download when the version requested can't"
" be found in the running environment."
),
choices=["auto", "never", "always"],
),
_option_set.Option(
"extra_pythons",
"--extra-pythons",
"--extra-python",
group=options.groups["python"],
nargs="*",
default=default_env_var_list_factory("NOXEXTRAPYTHON"),
help="Additionally, run sessions using the given python interpreter versions.",
completer=_python_completer,
),
_option_set.Option(
"force_pythons",
"-P",
"--force-pythons",
"--force-python",
group=options.groups["python"],
nargs="*",
default=default_env_var_list_factory("NOXFORCEPYTHON"),
help=(
"Run sessions with the given interpreters instead of those listed in the"
" Noxfile. This is a shorthand for ``--python=X.Y --extra-python=X.Y``."
" It will also work on sessions that don't have any interpreter parametrized."
),
finalizer_func=_force_pythons_finalizer,
completer=_python_completer,
),
*_option_set.make_flag_pair(
"stop_on_first_error",
("-x", "--stop-on-first-error"),
("--no-stop-on-first-error",),
group=options.groups["execution"],
help="Stop after the first error.",
),
*_option_set.make_flag_pair(
"error_on_missing_interpreters",
("--error-on-missing-interpreters",),
("--no-error-on-missing-interpreters",),
group=options.groups["execution"],
help="Error instead of skipping sessions if an interpreter can not be located.",
default=lambda: "CI" in os.environ,
),
*_option_set.make_flag_pair(
"error_on_external_run",
("--error-on-external-run",),
("--no-error-on-external-run",),
group=options.groups["execution"],
help=(
"Error if run() is used to execute a program that isn't installed in a"
" session's virtualenv."
),
),
_option_set.Option(
"install_only",
"--install-only",
group=options.groups["execution"],
action="store_true",
help="Skip session.run invocations in the Noxfile.",
),
_option_set.Option(
"no_install",
"--no-install",
default=False,
group=options.groups["execution"],
action="store_true",
help=(
"Skip invocations of session methods for installing packages"
" (session.install, session.conda_install, session.run_install)"
" when a virtualenv is being reused."
),
),
_option_set.Option(
"report",
"--report",
group=options.groups["reporting"],
noxfile=True,
help="Output a report of all sessions to the given filename.",
completer=argcomplete.completers.FilesCompleter(("json",)), # type: ignore[no-untyped-call]
),
_option_set.Option(
"non_interactive",
"--non-interactive",
group=options.groups["execution"],
action="store_true",
help=(
"Force session.interactive to always be False, even in interactive"
" sessions."
),
),
_option_set.Option(
"nocolor",
"--nocolor",
"--no-color",
group=options.groups["reporting"],
default=lambda: "NO_COLOR" in os.environ,
action="store_true",
help="Disable all color output.",
),
_option_set.Option(
"forcecolor",
"--forcecolor",
"--force-color",
group=options.groups["reporting"],
default=lambda: "FORCE_COLOR" in os.environ,
action="store_true",
help="Force color output, even if stdout is not an interactive terminal.",
),
_option_set.Option(
"color",
"--color",
group=options.groups["reporting"],
hidden=True,
finalizer_func=_color_finalizer,
),
# Stores the original working directory that Nox was invoked from,
# since it could be different from the Noxfile's directory.
_option_set.Option(
"invoked_from",
group=None,
hidden=True,
default=os.getcwd,
),
)
"""Options that are configurable in the Noxfile.
By setting properties on ``nox.options`` you can specify command line
arguments in your Noxfile. If an argument is specified in both the Noxfile
and on the command line, the command line arguments take precedence.
See :doc:`usage` for more details on these settings and their effect.
"""
noxfile_options = options.noxfile_namespace()
| DefaultStr |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 13885,
"end": 14218
} | class ____(_VectorizerConfigCreate):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.TEXT2VEC_VOYAGEAI, frozen=True, exclude=True
)
dimensions: Optional[int]
model: Optional[str]
baseURL: Optional[str]
truncate: Optional[bool]
vectorizeClassName: bool
| _Text2VecVoyageConfig |
python | huggingface__transformers | src/transformers/models/table_transformer/modeling_table_transformer.py | {
"start": 32571,
"end": 37570
} | class ____(TableTransformerPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TableTransformerEncoderLayer`].
The encoder updates the flattened feature map through multiple self-attention layers.
Small tweak for Table Transformer:
- object_queries are added to the forward pass.
Args:
config: TableTransformerConfig
"""
def __init__(self, config: TableTransformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.layers = nn.ModuleList([TableTransformerEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm = nn.LayerNorm(config.d_model)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs_embeds=None,
attention_mask=None,
object_queries=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
# we add object_queries as extra input to the encoder_layer
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
object_queries=object_queries,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
hidden_states = self.layernorm(hidden_states)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.detr.modeling_detr.DetrDecoder with DETR->TABLE_TRANSFORMER,Detr->TableTransformer
| TableTransformerEncoder |
python | huggingface__transformers | examples/pytorch/token-classification/run_ner.py | {
"start": 3800,
"end": 26658
} | class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a csv or JSON file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."},
)
text_column_name: Optional[str] = field(
default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."}
)
label_column_name: Optional[str] = field(
default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=None,
metadata={
"help": (
"The maximum total input sequence length after tokenization. If set, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": (
"Whether to put the label for one word on all tokens of generated by that word or just on the "
"one (in which case the other tokens will have a padding index)."
)
},
)
return_entity_level_metrics: bool = field(
default=False,
metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
self.task_name = self.task_name.lower()
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
features = raw_datasets["train"].features
else:
column_names = raw_datasets["validation"].column_names
features = raw_datasets["validation"].features
if data_args.text_column_name is not None:
text_column_name = data_args.text_column_name
elif "tokens" in column_names:
text_column_name = "tokens"
else:
text_column_name = column_names[0]
if data_args.label_column_name is not None:
label_column_name = data_args.label_column_name
elif f"{data_args.task_name}_tags" in column_names:
label_column_name = f"{data_args.task_name}_tags"
else:
label_column_name = column_names[1]
# In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the
# unique labels.
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
# If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere.
# Otherwise, we have to get the list of labels manually.
labels_are_int = isinstance(features[label_column_name].feature, ClassLabel)
if labels_are_int:
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(raw_datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path
if config.model_type in {"bloom", "gpt2", "roberta", "deberta"}:
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
add_prefix_space=True,
)
else:
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
)
# Tokenizer check: this script requires a fast tokenizer.
# Check if tokenizer has _tokenizer attribute (from tokenizers library) or is_fast property
if not (hasattr(tokenizer, "_tokenizer") or getattr(tokenizer, "is_fast", False)):
raise TypeError(
"This example script only works for models that have a fast tokenizer. Check out the big table of models at"
" https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet"
" this requirement"
)
# Model has labels -> use them.
if model.config.label2id != PreTrainedConfig(num_labels=num_labels).label2id:
if sorted(model.config.label2id.keys()) == sorted(label_list):
# Reorganize `label_list` to match the ordering of the model.
if labels_are_int:
label_to_id = {i: int(model.config.label2id[l]) for i, l in enumerate(label_list)}
label_list = [model.config.id2label[i] for i in range(num_labels)]
else:
label_list = [model.config.id2label[i] for i in range(num_labels)]
label_to_id = {l: i for i, l in enumerate(label_list)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: "
f"model labels: {sorted(model.config.label2id.keys())}, dataset labels:"
f" {sorted(label_list)}.\nIgnoring the model labels as a result.",
)
# Set the correspondences label/ID inside the model config
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = dict(enumerate(label_list))
# Map that sends B-Xxx label to its I-Xxx counterpart
b_to_i_label = []
for idx, label in enumerate(label_list):
if label.startswith("B-") and label.replace("B-", "I-") in label_list:
b_to_i_label.append(label_list.index(label.replace("B-", "I-")))
else:
b_to_i_label.append(idx)
# Preprocessing the dataset
# Padding strategy
padding = "max_length" if data_args.pad_to_max_length else False
# Tokenize all texts and align the labels with them.
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
max_length=data_args.max_seq_length,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
)
labels = []
for i, label in enumerate(examples[label_column_name]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label_to_id[label[word_idx]])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
if data_args.label_all_tokens:
label_ids.append(b_to_i_label[label_to_id[label[word_idx]]])
else:
label_ids.append(-100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
tokenize_and_align_labels,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
tokenize_and_align_labels,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if training_args.do_predict:
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test"]
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
tokenize_and_align_labels,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Data collator
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
# Metrics
metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir)
def compute_metrics(p):
predictions, labels = p
if not training_args.eval_do_concat_batches:
predictions = np.hstack(predictions)
labels = np.hstack(labels)
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
# Unpack nested dictionaries
final_results = {}
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
return final_results
else:
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
processing_class=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Predict
if training_args.do_predict:
logger.info("*** Predict ***")
predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict")
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
# Save predictions
output_predictions_file = os.path.join(training_args.output_dir, "predictions.txt")
if trainer.is_world_process_zero():
with open(output_predictions_file, "w") as writer:
writer.writelines(" ".join(prediction) + "\n" for prediction in true_predictions)
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| DataTrainingArguments |
python | tiangolo__fastapi | scripts/sponsors.py | {
"start": 1022,
"end": 1118
} | class ____(BaseModel):
sponsorEntity: SponsorEntity
tier: Tier
| SponsorshipAsMaintainerNode |
python | graphql-python__graphene | graphene/tests/issues/test_356.py | {
"start": 231,
"end": 698
} | class ____(graphene.Union):
class Meta:
types = (SomeTypeOne, SomeTypeTwo)
def test_issue():
class Query(graphene.ObjectType):
things = relay.ConnectionField(MyUnion)
with raises(Exception) as exc_info:
graphene.Schema(query=Query)
assert str(exc_info.value) == (
"Query fields cannot be resolved."
" IterableConnectionField type has to be a subclass of Connection."
' Received "MyUnion".'
)
| MyUnion |
python | ray-project__ray | python/ray/tests/test_task_events_2.py | {
"start": 1072,
"end": 11278
} | class ____:
def __init__(self):
raise ValueError("Actor init is expected to fail")
def ready(self):
pass
def test_actor_creation_task_ok(shutdown_only):
ray.init(_system_config=_SYSTEM_CONFIG)
a = ActorOk.remote()
ray.get(a.ready.remote())
def verify():
tasks = list_tasks(filters=[("name", "=", "ActorOk.__init__")])
actors = list_actors(filters=[("class_name", "=", "ActorOk")])
assert len(tasks) == 1
assert len(actors) == 1
actor = actors[0]
task = tasks[0]
assert task["state"] == "FINISHED"
assert task["actor_id"] == actor["actor_id"]
return True
wait_for_condition(verify)
def test_actor_creation_task_failed(shutdown_only):
ray.init(_system_config=_SYSTEM_CONFIG)
a = ActorInitFailed.remote()
with pytest.raises(ray.exceptions.RayActorError):
ray.get(a.ready.remote())
def verify():
tasks = list_tasks(filters=[("name", "=", "ActorInitFailed.__init__")])
actors = list_actors(filters=[("class_name", "=", "ActorInitFailed")])
assert len(tasks) == 1
assert len(actors) == 1
actor = actors[0]
task = tasks[0]
assert task["state"] == "FAILED"
assert task["actor_id"] == actor["actor_id"]
assert actor["state"] == "DEAD"
return True
wait_for_condition(verify)
def test_actor_creation_nested_failure_from_actor(shutdown_only):
ray.init(_system_config=_SYSTEM_CONFIG)
@ray.remote
class NestedActor:
def ready(self):
a = ActorInitFailed.remote()
ray.get(a.ready.remote())
a = NestedActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(a.ready.remote())
def verify():
creation_tasks = list_tasks(filters=[("type", "=", "ACTOR_CREATION_TASK")])
actors = list_actors()
assert len(creation_tasks) == 2
assert len(actors) == 2
for actor in actors:
if "NestedActor" in actor["class_name"]:
assert actor["state"] == "ALIVE"
else:
assert "ActorInitFailed" in actor["class_name"]
assert actor["state"] == "DEAD"
for task in creation_tasks:
if "ActorInitFailed" in task["name"]:
assert task["state"] == "FAILED"
else:
assert task["name"] == "NestedActor.__init__"
assert task["state"] == "FINISHED"
return True
wait_for_condition(verify)
def test_actor_creation_canceled(shutdown_only):
ray.init(num_cpus=2, _system_config=_SYSTEM_CONFIG)
# An actor not gonna be scheduled
a = ActorOk.options(num_cpus=10).remote()
# Kill it before it could be scheduled.
ray.kill(a)
def verify():
tasks = list_tasks(filters=[("name", "=", "ActorOk.__init__")])
actors = list_actors(filters=[("class_name", "=", "ActorOk")])
assert len(tasks) == 1
assert len(actors) == 1
actor = actors[0]
task = tasks[0]
assert task["state"] == "FAILED"
assert task["actor_id"] == actor["actor_id"]
assert actor["state"] == "DEAD"
return True
wait_for_condition(verify)
def test_handle_driver_tasks(shutdown_only):
ray.init(_system_config=_SYSTEM_CONFIG)
job_id = ray.get_runtime_context().get_job_id()
script = """
import ray
import time
ray.init("auto")
@ray.remote
def f():
time.sleep(3)
ray.get(f.remote())
"""
run_string_as_driver_nonblocking(script)
client = StateApiClient()
def list_tasks(exclude_driver):
return client.list(
StateResource.TASKS,
# Filter out this driver
options=ListApiOptions(
exclude_driver=exclude_driver, filters=[("job_id", "!=", job_id)]
),
raise_on_missing_output=True,
)
# Check driver running
def verify():
tasks_with_driver = list_tasks(exclude_driver=False)
assert len(tasks_with_driver) == 2, tasks_with_driver
task_types = {task["type"] for task in tasks_with_driver}
assert task_types == {"NORMAL_TASK", "DRIVER_TASK"}
for task in tasks_with_driver:
if task["type"] == "DRIVER_TASK":
assert task["state"] == "RUNNING", task
return True
wait_for_condition(verify, timeout=15, retry_interval_ms=1000)
# Check driver finishes
def verify():
tasks_with_driver = list_tasks(exclude_driver=False)
assert len(tasks_with_driver) == 2, tasks_with_driver
for task in tasks_with_driver:
if task["type"] == "DRIVER_TASK":
assert task["state"] == "FINISHED", task
tasks_no_driver = list_tasks(exclude_driver=True)
assert len(tasks_no_driver) == 1, tasks_no_driver
return True
wait_for_condition(verify)
def test_fault_tolerance_detached_actor(shutdown_only):
"""
Tests that tasks from a detached actor **shouldn't** be marked as failed
"""
ray.init(_system_config=_SYSTEM_CONFIG)
pid_actor = PidActor.remote()
# Check a detached actor's parent task's failure do not
# affect the actor's task subtree.
@ray.remote(max_retries=0)
def parent_starts_detached_actor(pid_actor):
@ray.remote
class DetachedActor:
def __init__(self):
pass
async def running(self):
while not self.running:
await asyncio.sleep(0.1)
pass
async def run(self, pid_actor):
ray.get(
pid_actor.report_pid.remote(
"detached-actor-run", os.getpid(), "RUNNING"
)
)
self.running = True
await asyncio.sleep(999)
# start a detached actor
a = DetachedActor.options(
name="detached-actor", lifetime="detached", namespace="test"
).remote()
a.run.options(name="detached-actor-run").remote(pid_actor)
ray.get(a.running.remote())
# Enough time for events to be reported to GCS.
time.sleep(1)
# fail this parent task
os._exit(1)
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(parent_starts_detached_actor.remote(pid_actor))
a = ray.get_actor("detached-actor", namespace="test")
task_pids = ray.get(pid_actor.get_pids.remote())
wait_for_condition(
verify_tasks_running_or_terminated,
task_pids=task_pids,
expect_num_tasks=1,
)
a = ray.get_actor("detached-actor", namespace="test")
ray.kill(a)
# Verify the actual process no longer running.
task_pids["detached-actor-run"] = (task_pids["detached-actor-run"][0], "FAILED")
wait_for_condition(
verify_tasks_running_or_terminated,
task_pids=task_pids,
expect_num_tasks=1,
)
# Verify failed task marked with expected info.
wait_for_condition(
verify_failed_task,
name="detached-actor-run",
error_type="WORKER_DIED",
error_message="The actor is dead because it was killed by `ray.kill`",
)
def test_fault_tolerance_job_failed(shutdown_only):
sys_config = _SYSTEM_CONFIG.copy()
config = {
"gcs_mark_task_failed_on_job_done_delay_ms": 1000,
# make worker failure not trigger task failure
"gcs_mark_task_failed_on_worker_dead_delay_ms": 30000,
}
sys_config.update(config)
ray.init(num_cpus=8, _system_config=sys_config)
script = """
import ray
import time
ray.init("auto")
NUM_CHILD = 2
@ray.remote
def grandchild():
time.sleep(999)
@ray.remote
def child():
ray.get(grandchild.remote())
@ray.remote
def finished_child():
ray.put(1)
return
@ray.remote
def parent():
children = [child.remote() for _ in range(NUM_CHILD)]
finished_children = ray.get([finished_child.remote() for _ in range(NUM_CHILD)])
ray.get(children)
ray.get(parent.remote())
"""
proc = run_string_as_driver_nonblocking(script)
def all_tasks_running():
tasks = list_tasks()
assert len(tasks) == 7, (
"Incorrect number of tasks are reported. "
"Expected length: 1 parent + 2 finished child + 2 failed child + "
"2 failed grandchild tasks"
)
return True
wait_for_condition(
all_tasks_running,
timeout=10,
retry_interval_ms=500,
)
time_sleep_s = 3
# Sleep for a while to allow driver job runs async.
time.sleep(time_sleep_s)
proc.kill()
def verify():
tasks = list_tasks(detail=True)
assert len(tasks) == 7, (
"Incorrect number of tasks are reported. "
"Expected length: 1 parent + 2 finished child + 2 failed child + "
"2 failed grandchild tasks"
)
for task in tasks:
if "finished" in task["func_or_class_name"]:
assert (
task["state"] == "FINISHED"
), f"task {task['func_or_class_name']} has wrong state"
else:
assert (
task["state"] == "FAILED"
), f"task {task['func_or_class_name']} has wrong state"
assert task["error_type"] == "WORKER_DIED"
assert "Job finishes" in task["error_message"]
duration_ms = task["end_time_ms"] - task["start_time_ms"]
assert (
# It takes time for the job to run
duration_ms > time_sleep_s / 2 * 1000
and duration_ms < 2 * time_sleep_s * 1000
)
return True
wait_for_condition(
verify,
timeout=10,
retry_interval_ms=500,
)
@ray.remote
def task_finish_child(pid_actor):
ray.get(pid_actor.report_pid.remote("task_finish_child", os.getpid(), "FINISHED"))
pass
@ray.remote
def task_sleep_child(pid_actor):
ray.get(pid_actor.report_pid.remote("task_sleep_child", os.getpid()))
time.sleep(999)
@ray.remote
| ActorInitFailed |
python | davidhalter__jedi | test/completion/dynamic_params.py | {
"start": 1786,
"end": 2123
} | class ____(from_class(1),):
pass
# -----------------
# comprehensions
# -----------------
def from_comprehension(foo):
#? int() float()
return foo
[from_comprehension(1.0) for n in (1,)]
[from_comprehension(n) for n in (1,)]
# -----------------
# lambdas
# -----------------
#? int()
x_lambda = lambda x: x
x_lambda(1)
| Foo |
python | ansible__ansible | test/units/module_utils/basic/test_run_command.py | {
"start": 1054,
"end": 1855
} | class ____(BytesIO):
def __init__(self, *args, **kwargs):
fh = kwargs.pop('fh', None)
super(SpecialBytesIO, self).__init__(*args, **kwargs)
self.fh = fh
def fileno(self):
return self.fh
# We need to do this because some of our tests create a new value for stdout and stderr
# The new value is able to affect the string that is returned by the subprocess stdout and
# stderr but by the time the test gets it, it is too late to change the SpecialBytesIO that
# subprocess.Popen returns for stdout and stderr. If we could figure out how to change those as
# well, then we wouldn't need this.
def __eq__(self, other):
if id(self) == id(other) or self.fh == other.fileno():
return True
return False
| SpecialBytesIO |
python | kamyu104__LeetCode-Solutions | Python/count-good-numbers.py | {
"start": 541,
"end": 767
} | class ____(object):
def countGoodNumbers(self, n):
"""
:type n: int
:rtype: int
"""
MOD = 10**9 + 7
return pow(5, (n+1)//2%(MOD-1), MOD)*pow(4, n//2%(MOD-1), MOD) % MOD
| Solution2 |
python | redis__redis-py | tests/test_connection_pool.py | {
"start": 28247,
"end": 28589
} | class ____:
@pytest.fixture()
def r(self, request):
return _get_client(redis.Redis, request, single_connection_client=False)
def test_multi_connection_command(self, r):
assert not r.connection
assert r.set("a", "123")
assert r.get("a") == b"123"
@pytest.mark.onlynoncluster
| TestMultiConnectionClient |
python | joke2k__faker | faker/providers/phone_number/__init__.py | {
"start": 317,
"end": 5825
} | class ____(BaseProvider):
country_calling_codes: ElementsType[str] = (
"+93",
"+358 18",
"+355",
"+213",
"+1 684",
"+376",
"+244",
"+1 264",
"+1 268",
"+54",
"+374",
"+297",
"+247",
"+61",
"+672 1",
"+672",
"+43",
"+994",
"+1 242",
"+973",
"+880",
"+1 246",
"+1 268",
"+375",
"+32",
"+501",
"+229",
"+1 441",
"+975",
"+591",
"+599 7",
"+387",
"+267",
"+55",
"+246",
"+1 284",
"+673",
"+359",
"+226",
"+257",
"+855",
"+237",
"+1",
"+238",
"+599 3",
"+599 4",
"+599 7",
"+1 345",
"+236",
"+235",
"+64",
"+56",
"+86",
"+61 89164",
"+61 89162",
"+57",
"+269",
"+242",
"+243",
"+682",
"+506",
"+385",
"+53",
"+599 9",
"+357",
"+420",
"+45",
"+246",
"+253",
"+1 767",
"+1 809",
"+1 829",
"+1 849",
"+670",
"+56",
"+593",
"+20",
"+503",
"+881 2",
"+881 3",
"+882 13",
"+240",
"+291",
"+372",
"+268",
"+251",
"+500",
"+298",
"+679",
"+358",
"+33",
"+596",
"+594",
"+689",
"+241",
"+220",
"+995",
"+49",
"+233",
"+350",
"+881",
"+881 8",
"+881 9",
"+30",
"+299",
"+1 473",
"+590",
"+1 671",
"+502",
"+44 1481",
"+44 7781",
"+44 7839",
"+44 7911",
"+224",
"+245",
"+592",
"+509",
"+504",
"+852",
"+36",
"+354",
"+881 0",
"+881 1",
"+91",
"+62",
"+870",
"+800",
"+882",
"+883",
"+979",
"+808",
"+98",
"+964",
"+353",
"+881 6",
"+881 7",
"+44 1624",
"+44 7524",
"+44 7624",
"+44 7924",
"+972",
"+39",
"+225",
"+1 876",
"+47 79",
"+81",
"+44 1534",
"+962",
"+7 6",
"+7 7",
"+254",
"+686",
"+850",
"+82",
"+383",
"+965",
"+996",
"+856",
"+371",
"+961",
"+266",
"+231",
"+218",
"+423",
"+370",
"+352",
"+853",
"+261",
"+265",
"+60",
"+960",
"+223",
"+356",
"+692",
"+596",
"+222",
"+230",
"+262 269",
"+262 639",
"+52",
"+691",
"+1 808",
"+373",
"+377",
"+976",
"+382",
"+1 664",
"+212",
"+258",
"+95",
"+374 47",
"+374 97",
"+264",
"+674",
"+977",
"+31",
"+1 869",
"+687",
"+64",
"+505",
"+227",
"+234",
"+683",
"+672 3",
"+389",
"+90 392",
"+44 28",
"+1 670",
"+47",
"+968",
"+92",
"+680",
"+970",
"+507",
"+675",
"+595",
"+51",
"+63",
"+64",
"+48",
"+351",
"+1 787",
"+1 939",
"+974",
"+262",
"+40",
"+7",
"+250",
"+599 4",
"+590",
"+290",
"+1 869",
"+1 758",
"+590",
"+508",
"+1 784",
"+685",
"+378",
"+239",
"+966",
"+221",
"+381",
"+248",
"+232",
"+65",
"+599 3",
"+1 721",
"+421",
"+386",
"+677",
"+252",
"+27",
"+500",
"+995 34",
"+211",
"+34",
"+94",
"+249",
"+597",
"+47 79",
"+46",
"+41",
"+963",
"+886",
"+992",
"+255",
"+888",
"+66",
"+882 16",
"+228",
"+690",
"+676",
"+373 2",
"+373 5",
"+1 868",
"+290 8",
"+216",
"+90",
"+993",
"+1 649",
"+688",
"+256",
"+380",
"+971",
"+44",
"+1",
"+878",
"+598",
"+1 340",
"+998",
"+678",
"+39 06 698",
"+379",
"+58",
"+84",
"+1 808",
"+681",
"+967",
"+260",
"+255 24",
"+263",
)
formats: ElementsType[str] = ("###-###-###",)
msisdn_formats: ElementsType[str] = ("#############",)
def phone_number(self) -> str:
return self.numerify(self.random_element(self.formats))
def country_calling_code(self) -> str:
return self.random_element(self.country_calling_codes)
def msisdn(self) -> str:
"""https://en.wikipedia.org/wiki/MSISDN"""
return self.numerify(self.random_element(self.msisdn_formats))
| Provider |
python | google__pytype | pytype/pyi/parser_test.py | {
"start": 90481,
"end": 91511
} | class ____(parser_test_base.ParserTestBase):
def check(self, src, expected):
tree = self.parse(src)
all_ = [x for x in tree.constants if x.name == "__all__"]
pyval = all_[0].value if all_ else None
self.assertEqual(pyval, expected)
def test_basic(self):
self.check(
"""
__all__ = ["f", "g"]
""",
("f", "g"),
)
def test_tuple(self):
self.check(
"""
__all__ = ("f", "g")
""",
("f", "g"),
)
def test_augment(self):
self.check(
"""
__all__ = ["f", "g"]
__all__ += ["h"]
""",
("f", "g", "h"),
)
def test_if(self):
self.check(
"""
__all__ = ["f", "g"]
if sys.version_info > (3, 6, 0):
__all__ += ["h"]
""",
("f", "g", "h"),
)
def test_else(self):
self.check(
"""
__all__ = ["f", "g"]
if sys.version_info < (3, 6, 0):
__all__ += ["e"]
else:
__all__ += ["h"]
""",
("f", "g", "h"),
)
| AllTest |
python | getsentry__sentry | tests/sentry/integrations/test_pipeline.py | {
"start": 28380,
"end": 29734
} | class ____(IntegrationTestCase):
provider = GitlabIntegrationProvider
external_id = "dummy_id-123"
def test_different_user_same_external_id(self, *args) -> None:
new_user = self.create_user()
self.setUp()
integration = self.create_provider_integration(
provider=self.provider.key,
external_id=self.external_id,
metadata={"url": "https://example.com"},
)
identity_provider = self.create_identity_provider(
external_id=self.external_id, type=self.provider.key
)
Identity.objects.create(
idp_id=identity_provider.id, external_id="AccountId", user_id=new_user.id
)
self.pipeline.state.data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
"user_identity": {
"type": self.provider.key,
"external_id": "AccountId",
"scopes": [],
"data": {},
},
}
resp = self.pipeline.finish_pipeline()
assert isinstance(resp, HttpResponse)
assert not OrganizationIntegration.objects.filter(integration_id=integration.id)
assert "account is linked to a different Sentry user" in resp.content.decode()
| GitlabFinishPipelineTest |
python | sqlalchemy__sqlalchemy | test/orm/test_instrumentation.py | {
"start": 759,
"end": 10236
} | class ____(fixtures.ORMTest):
def fixture(self):
return Table(
"t",
MetaData(),
Column("id", Integer, primary_key=True),
Column("type", Integer),
Column("x", Integer),
Column("y", Integer),
)
def register(self, cls, canary):
original_init = cls.__init__
instrumentation.register_class(cls)
ne_(cls.__init__, original_init)
manager = instrumentation.manager_of_class(cls)
def init(state, args, kwargs):
canary.append((cls, "init", state.class_))
event.listen(manager, "init", init, raw=True)
def test_ai(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
A()
eq_(inits, [(A, "__init__")])
def test_A(self):
inits = []
class A:
pass
self.register(A, inits)
A()
eq_(inits, [(A, "init", A)])
def test_Ai(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
self.register(A, inits)
A()
eq_(inits, [(A, "init", A), (A, "__init__")])
def test_ai_B(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
class B(A):
pass
self.register(B, inits)
A()
eq_(inits, [(A, "__init__")])
del inits[:]
B()
eq_(inits, [(B, "init", B), (A, "__init__")])
def test_ai_Bi(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
class B(A):
def __init__(self):
inits.append((B, "__init__"))
super().__init__()
self.register(B, inits)
A()
eq_(inits, [(A, "__init__")])
del inits[:]
B()
eq_(inits, [(B, "init", B), (B, "__init__"), (A, "__init__")])
def test_Ai_bi(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
self.register(A, inits)
class B(A):
def __init__(self):
inits.append((B, "__init__"))
super().__init__()
A()
eq_(inits, [(A, "init", A), (A, "__init__")])
del inits[:]
B()
eq_(inits, [(B, "__init__"), (A, "init", B), (A, "__init__")])
def test_Ai_Bi(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
self.register(A, inits)
class B(A):
def __init__(self):
inits.append((B, "__init__"))
super().__init__()
self.register(B, inits)
A()
eq_(inits, [(A, "init", A), (A, "__init__")])
del inits[:]
B()
eq_(inits, [(B, "init", B), (B, "__init__"), (A, "__init__")])
def test_Ai_B(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
self.register(A, inits)
class B(A):
pass
self.register(B, inits)
A()
eq_(inits, [(A, "init", A), (A, "__init__")])
del inits[:]
B()
eq_(inits, [(B, "init", B), (A, "__init__")])
def test_Ai_Bi_Ci(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
self.register(A, inits)
class B(A):
def __init__(self):
inits.append((B, "__init__"))
super().__init__()
self.register(B, inits)
class C(B):
def __init__(self):
inits.append((C, "__init__"))
super().__init__()
self.register(C, inits)
A()
eq_(inits, [(A, "init", A), (A, "__init__")])
del inits[:]
B()
eq_(inits, [(B, "init", B), (B, "__init__"), (A, "__init__")])
del inits[:]
C()
eq_(
inits,
[
(C, "init", C),
(C, "__init__"),
(B, "__init__"),
(A, "__init__"),
],
)
def test_Ai_bi_Ci(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
self.register(A, inits)
class B(A):
def __init__(self):
inits.append((B, "__init__"))
super().__init__()
class C(B):
def __init__(self):
inits.append((C, "__init__"))
super().__init__()
self.register(C, inits)
A()
eq_(inits, [(A, "init", A), (A, "__init__")])
del inits[:]
B()
eq_(inits, [(B, "__init__"), (A, "init", B), (A, "__init__")])
del inits[:]
C()
eq_(
inits,
[
(C, "init", C),
(C, "__init__"),
(B, "__init__"),
(A, "__init__"),
],
)
def test_Ai_b_Ci(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
self.register(A, inits)
class B(A):
pass
class C(B):
def __init__(self):
inits.append((C, "__init__"))
super().__init__()
self.register(C, inits)
A()
eq_(inits, [(A, "init", A), (A, "__init__")])
del inits[:]
B()
eq_(inits, [(A, "init", B), (A, "__init__")])
del inits[:]
C()
eq_(inits, [(C, "init", C), (C, "__init__"), (A, "__init__")])
def test_Ai_B_Ci(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
self.register(A, inits)
class B(A):
pass
self.register(B, inits)
class C(B):
def __init__(self):
inits.append((C, "__init__"))
super().__init__()
self.register(C, inits)
A()
eq_(inits, [(A, "init", A), (A, "__init__")])
del inits[:]
B()
eq_(inits, [(B, "init", B), (A, "__init__")])
del inits[:]
C()
eq_(inits, [(C, "init", C), (C, "__init__"), (A, "__init__")])
def test_Ai_B_C(self):
inits = []
class A:
def __init__(self):
inits.append((A, "__init__"))
self.register(A, inits)
class B(A):
pass
self.register(B, inits)
class C(B):
pass
self.register(C, inits)
A()
eq_(inits, [(A, "init", A), (A, "__init__")])
del inits[:]
B()
eq_(inits, [(B, "init", B), (A, "__init__")])
del inits[:]
C()
eq_(inits, [(C, "init", C), (A, "__init__")])
def test_A_Bi_C(self):
inits = []
class A:
pass
self.register(A, inits)
class B(A):
def __init__(self):
inits.append((B, "__init__"))
self.register(B, inits)
class C(B):
pass
self.register(C, inits)
A()
eq_(inits, [(A, "init", A)])
del inits[:]
B()
eq_(inits, [(B, "init", B), (B, "__init__")])
del inits[:]
C()
eq_(inits, [(C, "init", C), (B, "__init__")])
def test_A_B_Ci(self):
inits = []
class A:
pass
self.register(A, inits)
class B(A):
pass
self.register(B, inits)
class C(B):
def __init__(self):
inits.append((C, "__init__"))
self.register(C, inits)
A()
eq_(inits, [(A, "init", A)])
del inits[:]
B()
eq_(inits, [(B, "init", B)])
del inits[:]
C()
eq_(inits, [(C, "init", C), (C, "__init__")])
def test_A_B_C(self):
inits = []
class A:
pass
self.register(A, inits)
class B(A):
pass
self.register(B, inits)
class C(B):
pass
self.register(C, inits)
A()
eq_(inits, [(A, "init", A)])
del inits[:]
B()
eq_(inits, [(B, "init", B)])
del inits[:]
C()
eq_(inits, [(C, "init", C)])
def test_defaulted_init(self):
class X:
def __init__(self_, a, b=123, c="abc"):
self_.a = a
self_.b = b
self_.c = c
instrumentation.register_class(X)
o = X("foo")
eq_(o.a, "foo")
eq_(o.b, 123)
eq_(o.c, "abc")
class Y:
unique = object()
class OutOfScopeForEval:
def __repr__(self_):
# misleading repr
return "123"
outofscope = OutOfScopeForEval()
def __init__(self_, u=unique, o=outofscope):
self_.u = u
self_.o = o
instrumentation.register_class(Y)
o = Y()
assert o.u is Y.unique
assert o.o is Y.outofscope
| InitTest |
python | python__mypy | mypyc/test/test_tuplename.py | {
"start": 249,
"end": 1044
} | class ____(unittest.TestCase):
def setUp(self) -> None:
self.inst_a = RInstance(ClassIR("A", "__main__"))
self.inst_b = RInstance(ClassIR("B", "__main__"))
def test_names(self) -> None:
assert RTuple([int_rprimitive, int_rprimitive]).unique_id == "T2II"
assert RTuple([list_rprimitive, object_rprimitive, self.inst_a]).unique_id == "T3OOO"
assert RTuple([list_rprimitive, object_rprimitive, self.inst_b]).unique_id == "T3OOO"
assert RTuple([]).unique_id == "T0"
assert (
RTuple([RTuple([]), RTuple([int_rprimitive, int_rprimitive])]).unique_id == "T2T0T2II"
)
assert (
RTuple([bool_rprimitive, RUnion([bool_rprimitive, int_rprimitive])]).unique_id
== "T2CO"
)
| TestTupleNames |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 64333,
"end": 65178
} | class ____:
xlAboveAverageCondition = 12 # from enum XlFormatConditionType
xlBlanksCondition = 10 # from enum XlFormatConditionType
xlCellValue = 1 # from enum XlFormatConditionType
xlColorScale = 3 # from enum XlFormatConditionType
xlDatabar = 4 # from enum XlFormatConditionType
xlErrorsCondition = 16 # from enum XlFormatConditionType
xlExpression = 2 # from enum XlFormatConditionType
xlIconSets = 6 # from enum XlFormatConditionType
xlNoBlanksCondition = 13 # from enum XlFormatConditionType
xlNoErrorsCondition = 17 # from enum XlFormatConditionType
xlTextString = 9 # from enum XlFormatConditionType
xlTimePeriod = 11 # from enum XlFormatConditionType
xlTop10 = 5 # from enum XlFormatConditionType
xlUniqueValues = 8 # from enum XlFormatConditionType
| FormatConditionType |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/emr.py | {
"start": 4028,
"end": 5405
} | class ____(AwsBaseWaiterTrigger):
"""
Asynchronously poll the boto3 API and wait for the JobFlow to finish terminating.
:param job_flow_id: ID of the EMR Job Flow to terminate
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
job_flow_id: str,
aws_conn_id: str | None = None,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
):
super().__init__(
serialized_fields={"job_flow_id": job_flow_id},
waiter_name="job_flow_terminated",
waiter_args={"ClusterId": job_flow_id},
failure_message="JobFlow termination failed",
status_message="JobFlow termination in progress",
status_queries=[
"Cluster.Status.State",
"Cluster.Status.StateChangeReason",
"Cluster.Status.ErrorDetails",
],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return EmrHook(aws_conn_id=self.aws_conn_id)
| EmrTerminateJobFlowTrigger |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 36016,
"end": 36421
} | class ____(Element):
proto: ArrowProto = field(repr=False)
def __init__(self, proto: ArrowProto, root: ElementTree) -> None:
self.key = None
self.proto = proto
self.root = root
self.type = "arrow_table"
@property
def value(self) -> PandasDataframe:
return dataframe_util.convert_arrow_bytes_to_pandas_df(self.proto.data)
@dataclass(repr=False)
| Table |
python | kubernetes-client__python | kubernetes/client/models/v2_horizontal_pod_autoscaler_behavior.py | {
"start": 383,
"end": 4379
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'scale_down': 'V2HPAScalingRules',
'scale_up': 'V2HPAScalingRules'
}
attribute_map = {
'scale_down': 'scaleDown',
'scale_up': 'scaleUp'
}
def __init__(self, scale_down=None, scale_up=None, local_vars_configuration=None): # noqa: E501
"""V2HorizontalPodAutoscalerBehavior - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._scale_down = None
self._scale_up = None
self.discriminator = None
if scale_down is not None:
self.scale_down = scale_down
if scale_up is not None:
self.scale_up = scale_up
@property
def scale_down(self):
"""Gets the scale_down of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
:return: The scale_down of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
:rtype: V2HPAScalingRules
"""
return self._scale_down
@scale_down.setter
def scale_down(self, scale_down):
"""Sets the scale_down of this V2HorizontalPodAutoscalerBehavior.
:param scale_down: The scale_down of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
:type: V2HPAScalingRules
"""
self._scale_down = scale_down
@property
def scale_up(self):
"""Gets the scale_up of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
:return: The scale_up of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
:rtype: V2HPAScalingRules
"""
return self._scale_up
@scale_up.setter
def scale_up(self, scale_up):
"""Sets the scale_up of this V2HorizontalPodAutoscalerBehavior.
:param scale_up: The scale_up of this V2HorizontalPodAutoscalerBehavior. # noqa: E501
:type: V2HPAScalingRules
"""
self._scale_up = scale_up
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2HorizontalPodAutoscalerBehavior):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2HorizontalPodAutoscalerBehavior):
return True
return self.to_dict() != other.to_dict()
| V2HorizontalPodAutoscalerBehavior |
python | pypa__pip | tests/lib/test_lib.py | {
"start": 2625,
"end": 8652
} | class ____:
def run_stderr_with_prefix(
self, script: PipTestEnvironment, prefix: str, **kwargs: Any
) -> None:
"""
Call run() that prints stderr with the given prefix.
"""
text = f"{prefix}: hello, world\\n"
command = f'import sys; sys.stderr.write("{text}")'
args = [sys.executable, "-c", command]
script.run(*args, **kwargs)
def run_with_log_command(
self, script: PipTestEnvironment, sub_string: str, **kwargs: Any
) -> None:
"""
Call run() on a command that logs a "%"-style format string using
the given substring as the string's replacement field.
"""
command = (
"import logging; logging.basicConfig(level='INFO'); "
f"logging.getLogger().info('sub: {sub_string}', 'foo')"
)
args = [sys.executable, "-c", command]
script.run(*args, **kwargs)
@pytest.mark.parametrize(
"prefix",
[
"DEBUG",
"INFO",
"FOO",
],
)
def test_run__allowed_stderr(self, script: PipTestEnvironment, prefix: str) -> None:
"""
Test calling run() with allowed stderr.
"""
# Check that no error happens.
self.run_stderr_with_prefix(script, prefix)
def test_run__allow_stderr_warning(self, script: PipTestEnvironment) -> None:
"""
Test passing allow_stderr_warning=True.
"""
# Check that no error happens.
self.run_stderr_with_prefix(
script,
"WARNING",
allow_stderr_warning=True,
)
# Check that an error still happens with ERROR.
expected_start = "stderr has an unexpected error"
with assert_error_startswith(RuntimeError, expected_start):
self.run_stderr_with_prefix(
script,
"ERROR",
allow_stderr_warning=True,
)
@pytest.mark.parametrize(
"prefix",
[
"WARNING",
"ERROR",
],
)
def test_run__allow_stderr_error(
self, script: PipTestEnvironment, prefix: str
) -> None:
"""
Test passing allow_stderr_error=True.
"""
# Check that no error happens.
self.run_stderr_with_prefix(script, prefix, allow_stderr_error=True)
@pytest.mark.parametrize(
"prefix, expected_start",
[
("WARNING", "stderr has an unexpected warning"),
("ERROR", "stderr has an unexpected error"),
],
)
def test_run__unexpected_stderr(
self, script: PipTestEnvironment, prefix: str, expected_start: str
) -> None:
"""
Test calling run() with unexpected stderr output.
"""
with assert_error_startswith(RuntimeError, expected_start):
self.run_stderr_with_prefix(script, prefix)
def test_run__logging_error(self, script: PipTestEnvironment) -> None:
"""
Test calling run() with an unexpected logging error.
"""
# Pass a good substitution string.
self.run_with_log_command(script, sub_string="%r")
expected_start = "stderr has a logging error, which is never allowed"
with assert_error_startswith(RuntimeError, expected_start):
# Pass a bad substitution string. Also, pass
# allow_stderr_error=True to check that the RuntimeError occurs
# even under the stricter test condition of when we are allowing
# other types of errors.
self.run_with_log_command(
script,
sub_string="{!r}",
allow_stderr_error=True,
)
def test_run__allow_stderr_error_false_error_with_expect_error(
self, script: PipTestEnvironment
) -> None:
"""
Test passing allow_stderr_error=False with expect_error=True.
"""
expected_start = "cannot pass allow_stderr_error=False with expect_error=True"
with assert_error_startswith(RuntimeError, expected_start):
script.run("python", allow_stderr_error=False, expect_error=True)
def test_run__allow_stderr_warning_false_error_with_expect_stderr(
self, script: PipTestEnvironment
) -> None:
"""
Test passing allow_stderr_warning=False with expect_stderr=True.
"""
expected_start = (
"cannot pass allow_stderr_warning=False with expect_stderr=True"
)
with assert_error_startswith(RuntimeError, expected_start):
script.run(
"python",
allow_stderr_warning=False,
expect_stderr=True,
)
@pytest.mark.parametrize(
"arg_name",
[
"expect_error",
"allow_stderr_error",
],
)
def test_run__allow_stderr_warning_false_error(
self, script: PipTestEnvironment, arg_name: str
) -> None:
"""
Test passing allow_stderr_warning=False when it is not allowed.
"""
kwargs: dict[str, Any] = {"allow_stderr_warning": False, arg_name: True}
expected_start = (
"cannot pass allow_stderr_warning=False with allow_stderr_error=True"
)
with assert_error_startswith(RuntimeError, expected_start):
script.run("python", **kwargs)
def test_run__expect_error_fails_when_zero_returncode(
self, script: PipTestEnvironment
) -> None:
expected_start = "Script passed unexpectedly"
with assert_error_startswith(AssertionError, expected_start):
script.run("python", expect_error=True)
def test_run__no_expect_error_fails_when_nonzero_returncode(
self, script: PipTestEnvironment
) -> None:
expected_start = "Script returned code: 1"
with assert_error_startswith(AssertionError, expected_start):
script.run("python", "-c", "import sys; sys.exit(1)")
| TestPipTestEnvironment |
python | kamyu104__LeetCode-Solutions | Python/find-the-original-typed-string-i.py | {
"start": 288,
"end": 653
} | class ____(object):
def possibleStringCount(self, word):
"""
:type word: str
:rtype: int
"""
result = 1
curr = 0
for i in xrange(len(word)):
curr += 1
if i+1 == len(word) or word[i+1] != word[i]:
result += curr-1
curr = 0
return result
| Solution2 |
python | django__django | django/core/serializers/pyyaml.py | {
"start": 1245,
"end": 2302
} | class ____(PythonSerializer):
"""Convert a queryset to YAML."""
internal_use_only = False
def _value_from_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support).
# Since we want to use the "safe" serializer for better
# interoperability, we need to do something with those pesky times.
# Converting 'em to strings isn't perfect, but it's better than a
# "!!python/time" type which would halt deserialization under any other
# language.
value = super()._value_from_field(obj, field)
if isinstance(value, datetime.time):
value = str(value)
return value
def end_serialization(self):
self.options.setdefault("allow_unicode", True)
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grandparent super
return super(PythonSerializer, self).getvalue()
| Serializer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/_collections.py | {
"start": 17545,
"end": 19277
} | class ____(ScopedRegistry[_T]):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc: Callable[[], _T]):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self) -> _T:
try:
return self.registry.value # type: ignore[no-any-return]
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self) -> bool:
return hasattr(self.registry, "value")
def set(self, obj: _T) -> None:
self.registry.value = obj
def clear(self) -> None:
try:
del self.registry.value
except AttributeError:
pass
def has_dupes(sequence, target):
"""Given a sequence and search object, return True if there's more
than one, False if zero or one of them.
"""
# compare to .index version below, this version introduces less function
# overhead and is usually the same speed. At 15000 items (way bigger than
# a relationship-bound collection in memory usually is) it begins to
# fall behind the other version only by microseconds.
c = 0
for item in sequence:
if item is target:
c += 1
if c > 1:
return True
return False
# .index version. the two __contains__ calls as well
# as .index() and isinstance() slow this down.
# def has_dupes(sequence, target):
# if target not in sequence:
# return False
# elif not isinstance(sequence, collections_abc.Sequence):
# return False
#
# idx = sequence.index(target)
# return target in sequence[idx + 1:]
| ThreadLocalRegistry |
python | joke2k__faker | faker/providers/address/fr_CA/__init__.py | {
"start": 71,
"end": 1859
} | class ____(EnCaProvider):
# Most of the parts are identical to en_CA, we simply override those who are not shared between the two.
city_prefixes = (
"Ville",
"Baie",
"Saint-",
"Sainte-",
"Mont-",
"La",
"Lac-",
"L'",
"L'Île-",
)
city_suffixes = (
"Est",
"Ouest",
"-sur-Mer",
)
street_prefixes = (
"rue",
"rue",
"chemin",
"avenue",
"boulevard",
"route",
"rang",
"allé",
"montée",
)
provinces = (
"Alberta",
"Colombie-Britannique",
"Manitoba",
"Nouveau-Brunswick",
"Terre-Neuve-et-Labrador",
"Territoires du Nord-Ouest",
"Nouvelle-Écosse",
"Nunavut",
"Ontario",
"Île-du-Prince-Édouard",
"Québec",
"Saskatchewan",
"Yukon",
)
street_name_formats = (
"{{street_prefix}} {{first_name}}",
"{{street_prefix}} {{last_name}}",
)
city_formats = (
"{{city_prefix}} {{last_name}}",
"{{city_prefix}} {{last_name}}",
"{{city_prefix}}-{{city_prefix}}-{{last_name}}",
"{{city_prefix}} {{first_name}} {{city_suffix}}",
"{{city_prefix}} {{first_name}}",
"{{city_prefix}} {{first_name}}",
"{{city_prefix}} {{first_name}}",
"{{last_name}}",
"{{last_name}}",
"{{first_name}} {{city_suffix}}",
"{{last_name}} {{city_suffix}}",
)
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def street_prefix(self) -> str:
"""
:example: 'rue'
"""
return self.random_element(self.street_prefixes)
| Provider |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 12439,
"end": 12533
} | class ____(OpcodeWithArg):
_FLAGS = HAS_ARGUMENT | HAS_JREL
__slots__ = ()
| POP_JUMP_IF_FALSE |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 102731,
"end": 103045
} | class ____(Literal):
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if instring[loc] == self.firstMatchChar:
return loc + 1, self.match
raise ParseException(instring, loc, self.errmsg, self)
ParserElement._literalStringClass = Literal
| _SingleCharLiteral |
python | apache__airflow | airflow-core/tests/unit/utils/test_process_utils.py | {
"start": 1336,
"end": 3501
} | class ____:
@staticmethod
def _ignores_sigterm(child_pid, child_setup_done):
def signal_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGTERM, signal_handler)
child_pid.value = os.getpid()
child_setup_done.release()
while True:
time.sleep(1)
@staticmethod
def _parent_of_ignores_sigterm(parent_pid, child_pid, setup_done):
def signal_handler(unused_signum, unused_frame):
pass
os.setsid()
signal.signal(signal.SIGTERM, signal_handler)
child_setup_done = multiprocessing.Semaphore(0)
child = multiprocessing.Process(
target=TestReapProcessGroup._ignores_sigterm, args=[child_pid, child_setup_done]
)
child.start()
child_setup_done.acquire(timeout=5.0)
parent_pid.value = os.getpid()
setup_done.release()
while True:
time.sleep(1)
def test_reap_process_group(self):
"""
Spin up a process that can't be killed by SIGTERM and make sure
it gets killed anyway.
"""
parent_setup_done = multiprocessing.Semaphore(0)
parent_pid = multiprocessing.Value("i", 0)
child_pid = multiprocessing.Value("i", 0)
args = [parent_pid, child_pid, parent_setup_done]
parent = multiprocessing.Process(target=TestReapProcessGroup._parent_of_ignores_sigterm, args=args)
try:
parent.start()
assert parent_setup_done.acquire(timeout=5.0)
assert psutil.pid_exists(parent_pid.value)
assert psutil.pid_exists(child_pid.value)
process_utils.reap_process_group(parent_pid.value, logging.getLogger(), timeout=1)
assert not psutil.pid_exists(parent_pid.value)
assert not psutil.pid_exists(child_pid.value)
finally:
try:
os.kill(parent_pid.value, signal.SIGKILL) # terminate doesn't work here
os.kill(child_pid.value, signal.SIGKILL) # terminate doesn't work here
except OSError:
pass
@pytest.mark.db_test
| TestReapProcessGroup |
python | numba__numba | numba/tests/test_types.py | {
"start": 22504,
"end": 22737
} | class ____(TestCase):
"""Tests the use of the Type metaclass init correctly setting the flag on
the `is_internal` attr of a concrete Type class
"""
source_lines = """
from numba.core import types
| TestIsInternalTypeMarker |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_text_editor_code_execution_tool_result_block_param.py | {
"start": 1129,
"end": 1470
} | class ____(TypedDict, total=False):
content: Required[Content]
tool_use_id: Required[str]
type: Required[Literal["text_editor_code_execution_tool_result"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
| BetaTextEditorCodeExecutionToolResultBlockParam |
python | doocs__leetcode | solution/1300-1399/1312.Minimum Insertion Steps to Make a String Palindrome/Solution3.py | {
"start": 0,
"end": 414
} | class ____:
def minInsertions(self, s: str) -> int:
n = len(s)
f = [[0] * n for _ in range(n)]
for k in range(2, n + 1):
for i in range(n - k + 1):
j = i + k - 1
if s[i] == s[j]:
f[i][j] = f[i + 1][j - 1]
else:
f[i][j] = min(f[i + 1][j], f[i][j - 1]) + 1
return f[0][n - 1]
| Solution |
python | mwaskom__seaborn | seaborn/_stats/order.py | {
"start": 801,
"end": 2259
} | class ____(Stat):
"""
Replace observations with percentile values.
Parameters
----------
k : list of numbers or int
If a list of numbers, this gives the percentiles (in [0, 100]) to compute.
If an integer, compute `k` evenly-spaced percentiles between 0 and 100.
For example, `k=5` computes the 0, 25, 50, 75, and 100th percentiles.
method : str
Method for interpolating percentiles between observed datapoints.
See :func:`numpy.percentile` for valid options and more information.
Examples
--------
.. include:: ../docstrings/objects.Perc.rst
"""
k: int | list[float] = 5
method: str = "linear"
group_by_orient: ClassVar[bool] = True
def _percentile(self, data: DataFrame, var: str) -> DataFrame:
k = list(np.linspace(0, 100, self.k)) if isinstance(self.k, int) else self.k
method = cast(_MethodKind, self.method)
values = data[var].dropna()
if _version_predates(np, "1.22"):
res = np.percentile(values, k, interpolation=method) # type: ignore
else:
res = np.percentile(data[var].dropna(), k, method=method)
return DataFrame({var: res, "percentile": k})
def __call__(
self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
) -> DataFrame:
var = {"x": "y", "y": "x"}[orient]
return groupby.apply(data, self._percentile, var)
| Perc |
python | pennersr__django-allauth | allauth/account/stages.py | {
"start": 627,
"end": 1576
} | class ____:
key: str # Set in subclasses
urlname: Optional[str] = None
login: Login
def __init__(self, controller, request, login):
if not self.key:
raise ValueError()
self.controller = controller
self.request = request
self.login = login
self.state = (
self.login.state.setdefault("stages", {})
.setdefault(self.key, {})
.setdefault("data", {})
)
def handle(self):
return None, True
def exit(self):
from allauth.account.internal.flows.login import resume_login
self.controller.set_handled(self.key)
return resume_login(self.request, self.login)
def abort(self):
from allauth.account.internal.stagekit import clear_login
clear_login(self.request)
return headed_redirect_response("account_login")
def is_resumable(self, request):
return True
| LoginStage |
python | joke2k__faker | faker/providers/phone_number/lv_LV/__init__.py | {
"start": 49,
"end": 184
} | class ____(PhoneNumberProvider):
formats = (
"+371 ########",
"+(371) ########",
"+371########",
)
| Provider |
python | tensorflow__tensorflow | third_party/xla/xla/python/profiler/profile_data_test.py | {
"start": 781,
"end": 6373
} | class ____(absltest.TestCase):
def test_find_plane_with_name(self):
profile = profile_data.ProfileData.from_text_proto("""
planes { name: "a" }
planes { name: "b" }
""")
self.assertEqual(profile.find_plane_with_name('a').name, 'a')
self.assertEqual(profile.find_plane_with_name('b').name, 'b')
self.assertIsNone(profile.find_plane_with_name('c'))
def test_visit_space(self):
space = profile_data.ProfileData.from_text_proto("""
planes { name: "a" }
planes { name: "b" }
""")
self.assertEqual([plane.name for plane in space.planes], ['a', 'b'])
def test_visit_empty_space(self):
space = profile_data.ProfileData.from_text_proto('')
self.assertEmpty(list(space.planes))
def test_visit_plane(self):
profile = profile_data.ProfileData.from_text_proto("""
planes {
name: "p0"
lines { name: "t1" }
lines { name: "t2" }
stats { metadata_id: 1 str_value: "world" }
stat_metadata {
key: 1
value { name: "hello" }
}
}
""")
plane = profile.find_plane_with_name('p0')
self.assertEqual(plane.name, 'p0')
self.assertEqual([line.name for line in plane.lines], ['t1', 't2'])
self.assertEqual(dict(plane.stats), {'hello': 'world'})
def test_visit_empty_plane(self):
profile = profile_data.ProfileData.from_text_proto('planes {}')
plane = next(profile.planes)
self.assertEmpty(plane.name)
self.assertEmpty(list(plane.lines))
def test_visit_line(self):
profile = profile_data.ProfileData.from_text_proto("""
planes {
name: "p0"
lines {
name: "t100"
events { metadata_id: 1 }
events { metadata_id: 2 }
}
event_metadata {
key: 1
value { name: "foo" }
}
event_metadata {
key: 2
value { name: "bar" }
}
}
""")
plane = next(profile.planes)
lines = list(plane.lines)
self.assertLen(lines, 1)
line = lines[0]
self.assertEqual(line.name, 't100')
self.assertListEqual([event.name for event in line.events], ['foo', 'bar'])
def test_visit_empty_line(self):
profile = profile_data.ProfileData.from_text_proto("""
planes {
name: "p0"
lines {}
}
""")
plane = next(profile.planes)
lines = list(plane.lines)
self.assertLen(lines, 1)
line = lines[0]
self.assertEmpty(line.name)
self.assertEmpty(list(line.events))
def test_visit_event(self):
profile = profile_data.ProfileData.from_text_proto("""
planes {
name: "p0"
lines {
timestamp_ns: 1000
events {
metadata_id: 1
offset_ps: 500000
duration_ps: 600000
stats { metadata_id: 1 double_value: 400.0 }
stats { metadata_id: 2 uint64_value: 1024 }
stats { metadata_id: 3 ref_value: 4 }
}
}
event_metadata {
key: 1
value { name: "hlo" }
}
stat_metadata {
key: 1
value { name: "flops" }
}
stat_metadata {
key: 2
value { name: "bytes" }
}
stat_metadata {
key: 3
value { name: "provenance" }
}
stat_metadata {
key: 4
value { name: "tf_op" }
}
}
""")
plane = next(profile.planes)
lines = list(plane.lines)
self.assertLen(lines, 1)
line = lines[0]
events = list(line.events)
self.assertLen(events, 1)
event = events[0]
self.assertEqual(event.start_ns, 1500.0)
self.assertEqual(event.duration_ns, 600.0)
self.assertEqual(event.end_ns, 2100.0)
self.assertEqual(event.name, 'hlo')
self.assertEqual(
dict(event.stats),
{'flops': 400.0, 'bytes': 1024, 'provenance': 'tf_op'},
)
def test_visit_event_missing_metadata(self):
profile = profile_data.ProfileData.from_text_proto("""
planes {
name: "p0"
lines {
timestamp_ns: 1000
events {
metadata_id: 1
stats { metadata_id: 1 double_value: 400.0 }
stats { metadata_id: 2 uint64_value: 1024 }
stats { metadata_id: 3 ref_value: 4 }
}
}
stat_metadata {
key: 1
value { name: "flops" }
}
stat_metadata {
key: 3
value { name: "provenance" }
}
}
""")
plane = next(profile.planes)
lines = list(plane.lines)
self.assertLen(lines, 1)
line = lines[0]
events = list(line.events)
self.assertLen(events, 1)
event = events[0]
self.assertEqual(event.name, '')
self.assertEqual(
dict(filter(lambda x: x[0] is not None, event.stats)),
{'flops': 400.0, 'provenance': ''},
)
def test_create_profile_data_from_file(self):
serialized = profile_data.ProfileData.text_proto_to_serialized_xspace("""
planes { name: "a" }
planes { name: "b" }
""")
tmp_file = self.create_tempfile().full_path
with open(tmp_file, 'wb') as f:
f.write(serialized)
profile = profile_data.ProfileData.from_file(tmp_file)
self.assertEqual([plane.name for plane in profile.planes], ['a', 'b'])
if __name__ == '__main__':
absltest.main()
| ProfileDataTest |
python | django__django | tests/db_functions/text/test_ord.py | {
"start": 205,
"end": 1238
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.john = Author.objects.create(name="John Smith", alias="smithj")
cls.elena = Author.objects.create(name="Élena Jordan", alias="elena")
cls.rhonda = Author.objects.create(name="Rhonda")
def test_basic(self):
authors = Author.objects.annotate(name_part=Ord("name"))
self.assertCountEqual(
authors.filter(name_part__gt=Ord(Value("John"))), [self.elena, self.rhonda]
)
self.assertCountEqual(
authors.exclude(name_part__gt=Ord(Value("John"))), [self.john]
)
def test_transform(self):
with register_lookup(CharField, Ord):
authors = Author.objects.annotate(first_initial=Left("name", 1))
self.assertCountEqual(
authors.filter(first_initial__ord=ord("J")), [self.john]
)
self.assertCountEqual(
authors.exclude(first_initial__ord=ord("J")), [self.elena, self.rhonda]
)
| OrdTests |
python | davidhalter__jedi | test/completion/classes.py | {
"start": 6176,
"end": 6319
} | class ____():
def __init__(self, obj):
self.obj = obj
def __getattr__(self, name):
return getattr(self.obj, name)
| Wrapper |
python | charliermarsh__ruff | crates/ruff_python_ast/generate.py | {
"start": 3618,
"end": 5007
} | class ____:
name: str
variant: str
ty: str
doc: str | None
fields: list[Field] | None
derives: list[str]
custom_source_order: bool
source_order: list[str] | None
def __init__(self, group: Group, node_name: str, node: dict[str, Any]) -> None:
self.name = node_name
self.variant = node.get("variant", node_name.removeprefix(group.name))
self.ty = f"crate::{node_name}"
self.fields = None
fields = node.get("fields")
if fields is not None:
self.fields = [Field(f) for f in fields]
self.custom_source_order = node.get("custom_source_order", False)
self.derives = node.get("derives", [])
self.doc = node.get("doc")
self.source_order = node.get("source_order")
def fields_in_source_order(self) -> list[Field]:
if self.fields is None:
return []
if self.source_order is None:
return list(filter(lambda x: not x.skip_source_order(), self.fields))
fields = []
for field_name in self.source_order:
field = None
for field in self.fields:
if field.skip_source_order():
continue
if field.name == field_name:
field = field
break
fields.append(field)
return fields
@dataclass
| Node |
python | walkccc__LeetCode | solutions/689. Maximum Sum of 3 Non-Overlapping Subarrays/689.py | {
"start": 0,
"end": 1030
} | class ____:
def maxSumOfThreeSubarrays(self, nums: list[int], k: int) -> list[int]:
n = len(nums) - k + 1
# sums[i] := sum(nums[i..i + k))
sums = [0] * n
# l[i] := the index in [0..i] that has the maximum sums[i]
l = [0] * n
# r[i] := the index in [i..n) that has the maximum sums[i]
r = [0] * n
summ = 0
for i, num in enumerate(nums):
summ += num
if i >= k:
summ -= nums[i - k]
if i >= k - 1:
sums[i - k + 1] = summ
maxIndex = 0
for i in range(n):
if sums[i] > sums[maxIndex]:
maxIndex = i
l[i] = maxIndex
maxIndex = n - 1
for i in range(n - 1, -1, -1):
if sums[i] >= sums[maxIndex]:
maxIndex = i
r[i] = maxIndex
ans = [-1, -1, -1]
for i in range(k, n - k):
if (ans[0] == -1 or
sums[ans[0]] + sums[ans[1]] + sums[ans[2]] <
sums[l[i - k]] + sums[i] + sums[r[i + k]]):
ans[0] = l[i - k]
ans[1] = i
ans[2] = r[i + k]
return ans
| Solution |
python | realpython__materials | pyqt-calculator-tutorial/pycalc/pycalc.py | {
"start": 359,
"end": 2262
} | class ____(QMainWindow):
"""PyCalc's main window (GUI or view)."""
def __init__(self):
super().__init__()
self.setWindowTitle("PyCalc")
self.setFixedSize(WINDOW_SIZE, WINDOW_SIZE)
self.generalLayout = QVBoxLayout()
centralWidget = QWidget(self)
centralWidget.setLayout(self.generalLayout)
self.setCentralWidget(centralWidget)
self._createDisplay()
self._createButtons()
def _createDisplay(self):
self.display = QLineEdit()
self.display.setFixedHeight(DISPLAY_HEIGHT)
self.display.setAlignment(Qt.AlignmentFlag.AlignRight)
self.display.setReadOnly(True)
self.generalLayout.addWidget(self.display)
def _createButtons(self):
self.buttonMap = {}
buttonsLayout = QGridLayout()
keyBoard = [
["7", "8", "9", "/", "C"],
["4", "5", "6", "*", "("],
["1", "2", "3", "-", ")"],
["0", "00", ".", "+", "="],
]
for row, keys in enumerate(keyBoard):
for col, key in enumerate(keys):
self.buttonMap[key] = QPushButton(key)
self.buttonMap[key].setFixedSize(BUTTON_SIZE, BUTTON_SIZE)
buttonsLayout.addWidget(self.buttonMap[key], row, col)
self.generalLayout.addLayout(buttonsLayout)
def setDisplayText(self, text):
"""Set the display's text."""
self.display.setText(text)
self.display.setFocus()
def displayText(self):
"""Get the display's text."""
return self.display.text()
def clearDisplay(self):
"""Clear the display."""
self.setDisplayText("")
def evaluateExpression(expression):
"""Evaluate an expression (Model)."""
try:
result = str(eval(expression, {}, {}))
except Exception:
result = ERROR_MSG
return result
| PyCalcWindow |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/name_mangling.py | {
"start": 67,
"end": 169
} | class ____(Foo):
__address = None
#: a member having mangled-like name
_Baz__email = None
| Bar |
python | has2k1__plotnine | plotnine/mapping/evaluation.py | {
"start": 713,
"end": 5913
} | class ____:
"""
Stage allows you evaluating mapping at more than one stage
You can evaluate an expression of a variable in a dataframe, and
later evaluate an expression that modifies the values mapped to
the scale.
Parameters
----------
start : str | array_like | scalar
Aesthetic expression using primary variables from the layer
data.
after_stat : str
Aesthetic expression using variables calculated by the stat.
after_scale : str
Aesthetic expression using aesthetics of the layer.
"""
def __init__(self, start=None, after_stat=None, after_scale=None):
self.start = start
self.after_stat = after_stat
self.after_scale = after_scale
def __repr__(self):
"""
Repr for staged mapping
"""
# Shorter representation when the mapping happens at a
# single stage
if self.after_stat is None and self.after_scale is None:
return f"{repr(self.start)}"
if self.start is None and self.after_scale is None:
return f"after_stat({repr(self.after_stat)})"
if self.start is None and self.after_stat is None:
return f"after_scale({repr(self.after_scale)})"
return (
f"stage(start={repr(self.start)}, "
f"after_stat={repr(self.after_stat)}, "
f"after_scale={repr(self.after_scale)})"
)
def after_stat(x):
"""
Evaluate mapping after statistic has been calculated
Parameters
----------
x : str
An expression
See Also
--------
plotnine.after_scale
plotnine.stage
"""
return stage(after_stat=x)
def after_scale(x):
"""
Evaluate mapping after variable has been mapped to the scale
This gives the user a chance to alter the value of a variable
in the final units of the scale e.g. the rgb hex color.
Parameters
----------
x : str
An expression
See Also
--------
plotnine.after_stat
plotnine.stage
"""
return stage(after_scale=x)
def evaluate(
aesthetics: aes | dict[str, Any], data: pd.DataFrame, env: Environment
) -> pd.DataFrame:
"""
Evaluate aesthetics
Parameters
----------
aesthetics :
Aesthetics to evaluate. They must be of the form {name: expr}
data :
Dataframe whose columns are/may-be variables in the aesthetic
expressions i.e. it is a namespace with variables.
env :
Environment in which the aesthetics are evaluated
Returns
-------
pd.DataFrame
Dataframe of the form {name: result}, where each column is the
result from evaluating an expression.
Examples
--------
>>> from plotnine.mapping import Environment
>>> var1 = 2
>>> env = Environment.capture()
>>> df = pd.DataFrame({'x': range(1, 6)})
>>> aesthetics = {'y': 'x**var1'}
>>> evaluate(aesthetics, df, env)
y
0 1
1 4
2 9
3 16
4 25
"""
env = env.with_outer_namespace(EVAL_ENVIRONMENT)
# Store evaluation results in a dict column in a dict
evaled = {}
# If a column name is not in the data, it is evaluated/transformed
# in the environment of the call to ggplot
for ae, col in aesthetics.items():
if isinstance(col, str):
if col in data:
evaled[ae] = data[col]
else:
try:
new_val = env.eval(col, inner_namespace=data)
except Exception as e:
msg = _TPL_EVAL_FAIL.format(ae, col, str(e))
raise PlotnineError(msg) from e
try:
evaled[ae] = new_val
except Exception as e:
msg = _TPL_BAD_EVAL_TYPE.format(
ae, col, str(type(new_val)), str(e)
)
raise PlotnineError(msg) from e
elif pdtypes.is_list_like(col):
n = len(col)
if len(data) and n != len(data) and n != 1:
msg = (
"Aesthetics must either be length one, "
"or the same length as the data"
)
raise PlotnineError(msg)
evaled[ae] = col
elif is_known_scalar(col) or col is None:
if not len(evaled):
col = [col]
evaled[ae] = col
else:
msg = f"Do not know how to deal with aesthetic '{ae}'"
raise PlotnineError(msg)
# Using `type` preserves the subclass of pd.DataFrame
index = data.index if len(data.index) and evaled else None
evaled = type(data)(data=evaled, index=index)
return evaled
def is_known_scalar(value):
"""
Return True if value is a type we expect in a dataframe
"""
def _is_datetime_or_timedelta(value):
# Using pandas.Series helps catch python, numpy and pandas
# versions of these types
return pd.Series(value).dtype.kind in ("M", "m")
return not np.iterable(value) and (
isinstance(value, numbers.Number) or _is_datetime_or_timedelta(value)
)
| stage |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 2465,
"end": 2593
} | class ____(models.Manager):
def get_queryset(self):
return super().get_queryset().exclude(id=1)
| AlternativePollManager |
python | docker__docker-py | docker/types/containers.py | {
"start": 7955,
"end": 23806
} | class ____(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
privileged=False, dns=None, dns_search=None,
volumes_from=None, network_mode=None, restart_policy=None,
cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None, log_config=None,
mem_limit=None, memswap_limit=None, mem_reservation=None,
kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
group_add=None, cpu_quota=None, cpu_period=None,
blkio_weight=None, blkio_weight_device=None,
device_read_bps=None, device_write_bps=None,
device_read_iops=None, device_write_iops=None,
oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
cpuset_cpus=None, userns_mode=None, uts_mode=None,
pids_limit=None, isolation=None, auto_remove=False,
storage_opt=None, init=None, init_path=None,
volume_driver=None, cpu_count=None, cpu_percent=None,
nano_cpus=None, cpuset_mems=None, runtime=None, mounts=None,
cpu_rt_period=None, cpu_rt_runtime=None,
device_cgroup_rules=None, device_requests=None,
cgroupns=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
if memswap_limit is not None:
self['MemorySwap'] = parse_bytes(memswap_limit)
if mem_reservation:
self['MemoryReservation'] = parse_bytes(mem_reservation)
if kernel_memory:
self['KernelMemory'] = parse_bytes(kernel_memory)
if mem_swappiness is not None:
if not isinstance(mem_swappiness, int):
raise host_config_type_error(
'mem_swappiness', mem_swappiness, 'int'
)
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
if isinstance(shm_size, str):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
if pid_mode:
if version_lt(version, '1.24') and pid_mode != 'host':
raise host_config_value_error('pid_mode', pid_mode)
self['PidMode'] = pid_mode
if ipc_mode:
self['IpcMode'] = ipc_mode
if privileged:
self['Privileged'] = privileged
if oom_kill_disable:
self['OomKillDisable'] = oom_kill_disable
if oom_score_adj:
if version_lt(version, '1.22'):
raise host_config_version_error('oom_score_adj', '1.22')
if not isinstance(oom_score_adj, int):
raise host_config_type_error(
'oom_score_adj', oom_score_adj, 'int'
)
self['OomScoreAdj'] = oom_score_adj
if publish_all_ports:
self['PublishAllPorts'] = publish_all_ports
if read_only is not None:
self['ReadonlyRootfs'] = read_only
if dns_search:
self['DnsSearch'] = dns_search
if network_mode == 'host' and port_bindings:
raise host_config_incompatible_error(
'network_mode', 'host', 'port_bindings'
)
self['NetworkMode'] = network_mode or 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
raise host_config_type_error(
'restart_policy', restart_policy, 'dict'
)
self['RestartPolicy'] = restart_policy
if cap_add:
self['CapAdd'] = cap_add
if cap_drop:
self['CapDrop'] = cap_drop
if devices:
self['Devices'] = parse_devices(devices)
if group_add:
self['GroupAdd'] = [str(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
if dns_opt is not None:
self['DnsOptions'] = dns_opt
if security_opt is not None:
if not isinstance(security_opt, list):
raise host_config_type_error(
'security_opt', security_opt, 'list'
)
self['SecurityOpt'] = security_opt
if sysctls:
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
for k, v in sysctls.items():
self['Sysctls'][k] = str(v)
if volumes_from is not None:
if isinstance(volumes_from, str):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
if binds is not None:
self['Binds'] = convert_volume_binds(binds)
if port_bindings is not None:
self['PortBindings'] = convert_port_bindings(port_bindings)
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
extra_hosts = format_extra_hosts(extra_hosts)
self['ExtraHosts'] = extra_hosts
if links is not None:
self['Links'] = normalize_links(links)
if isinstance(lxc_conf, dict):
formatted = []
for k, v in lxc_conf.items():
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
if lxc_conf is not None:
self['LxcConf'] = lxc_conf
if cgroup_parent is not None:
self['CgroupParent'] = cgroup_parent
if ulimits is not None:
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
self['Ulimits'] = []
for lmt in ulimits:
if not isinstance(lmt, Ulimit):
lmt = Ulimit(**lmt)
self['Ulimits'].append(lmt)
if log_config is not None:
if not isinstance(log_config, LogConfig):
if not isinstance(log_config, dict):
raise host_config_type_error(
'log_config', log_config, 'LogConfig'
)
log_config = LogConfig(**log_config)
self['LogConfig'] = log_config
if cpu_quota:
if not isinstance(cpu_quota, int):
raise host_config_type_error('cpu_quota', cpu_quota, 'int')
self['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
raise host_config_type_error('cpu_period', cpu_period, 'int')
self['CpuPeriod'] = cpu_period
if cpu_shares:
if not isinstance(cpu_shares, int):
raise host_config_type_error('cpu_shares', cpu_shares, 'int')
self['CpuShares'] = cpu_shares
if cpuset_cpus:
self['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
if not isinstance(cpuset_mems, str):
raise host_config_type_error(
'cpuset_mems', cpuset_mems, 'str'
)
self['CpusetMems'] = cpuset_mems
if cpu_rt_period:
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_rt_period', '1.25')
if not isinstance(cpu_rt_period, int):
raise host_config_type_error(
'cpu_rt_period', cpu_rt_period, 'int'
)
self['CPURealtimePeriod'] = cpu_rt_period
if cpu_rt_runtime:
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_rt_runtime', '1.25')
if not isinstance(cpu_rt_runtime, int):
raise host_config_type_error(
'cpu_rt_runtime', cpu_rt_runtime, 'int'
)
self['CPURealtimeRuntime'] = cpu_rt_runtime
if blkio_weight:
if not isinstance(blkio_weight, int):
raise host_config_type_error(
'blkio_weight', blkio_weight, 'int'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight', '1.22')
self["BlkioWeight"] = blkio_weight
if blkio_weight_device:
if not isinstance(blkio_weight_device, list):
raise host_config_type_error(
'blkio_weight_device', blkio_weight_device, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight_device', '1.22')
self["BlkioWeightDevice"] = blkio_weight_device
if device_read_bps:
if not isinstance(device_read_bps, list):
raise host_config_type_error(
'device_read_bps', device_read_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_bps', '1.22')
self["BlkioDeviceReadBps"] = device_read_bps
if device_write_bps:
if not isinstance(device_write_bps, list):
raise host_config_type_error(
'device_write_bps', device_write_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_bps', '1.22')
self["BlkioDeviceWriteBps"] = device_write_bps
if device_read_iops:
if not isinstance(device_read_iops, list):
raise host_config_type_error(
'device_read_iops', device_read_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_iops', '1.22')
self["BlkioDeviceReadIOps"] = device_read_iops
if device_write_iops:
if not isinstance(device_write_iops, list):
raise host_config_type_error(
'device_write_iops', device_write_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_iops', '1.22')
self["BlkioDeviceWriteIOps"] = device_write_iops
if tmpfs:
if version_lt(version, '1.22'):
raise host_config_version_error('tmpfs', '1.22')
self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
if userns_mode:
if version_lt(version, '1.23'):
raise host_config_version_error('userns_mode', '1.23')
if userns_mode != "host":
raise host_config_value_error("userns_mode", userns_mode)
self['UsernsMode'] = userns_mode
if uts_mode:
if uts_mode != "host":
raise host_config_value_error("uts_mode", uts_mode)
self['UTSMode'] = uts_mode
if pids_limit:
if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int')
if version_lt(version, '1.23'):
raise host_config_version_error('pids_limit', '1.23')
self["PidsLimit"] = pids_limit
if isolation:
if not isinstance(isolation, str):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
self['Isolation'] = isolation
if auto_remove:
if version_lt(version, '1.25'):
raise host_config_version_error('auto_remove', '1.25')
self['AutoRemove'] = auto_remove
if storage_opt is not None:
if version_lt(version, '1.24'):
raise host_config_version_error('storage_opt', '1.24')
self['StorageOpt'] = storage_opt
if init is not None:
if version_lt(version, '1.25'):
raise host_config_version_error('init', '1.25')
self['Init'] = init
if init_path is not None:
if version_lt(version, '1.25'):
raise host_config_version_error('init_path', '1.25')
if version_gte(version, '1.29'):
# https://github.com/moby/moby/pull/32470
raise host_config_version_error('init_path', '1.29', False)
self['InitPath'] = init_path
if volume_driver is not None:
self['VolumeDriver'] = volume_driver
if cpu_count:
if not isinstance(cpu_count, int):
raise host_config_type_error('cpu_count', cpu_count, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_count', '1.25')
self['CpuCount'] = cpu_count
if cpu_percent:
if not isinstance(cpu_percent, int):
raise host_config_type_error('cpu_percent', cpu_percent, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_percent', '1.25')
self['CpuPercent'] = cpu_percent
if nano_cpus:
if not isinstance(nano_cpus, int):
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25')
self['NanoCpus'] = nano_cpus
if runtime:
if version_lt(version, '1.25'):
raise host_config_version_error('runtime', '1.25')
self['Runtime'] = runtime
if mounts is not None:
if version_lt(version, '1.30'):
raise host_config_version_error('mounts', '1.30')
self['Mounts'] = mounts
if device_cgroup_rules is not None:
if version_lt(version, '1.28'):
raise host_config_version_error('device_cgroup_rules', '1.28')
if not isinstance(device_cgroup_rules, list):
raise host_config_type_error(
'device_cgroup_rules', device_cgroup_rules, 'list'
)
self['DeviceCgroupRules'] = device_cgroup_rules
if device_requests is not None:
if version_lt(version, '1.40'):
raise host_config_version_error('device_requests', '1.40')
if not isinstance(device_requests, list):
raise host_config_type_error(
'device_requests', device_requests, 'list'
)
self['DeviceRequests'] = []
for req in device_requests:
if not isinstance(req, DeviceRequest):
req = DeviceRequest(**req)
self['DeviceRequests'].append(req)
if cgroupns:
self['CgroupnsMode'] = cgroupns
def host_config_type_error(param, param_value, expected):
return TypeError(
f'Invalid type for {param} param: expected {expected} '
f'but found {type(param_value)}'
)
def host_config_version_error(param, version, less_than=True):
operator = '<' if less_than else '>'
return errors.InvalidVersion(
f'{param} param is not supported in API versions {operator} {version}',
)
def host_config_value_error(param, param_value):
return ValueError(f'Invalid value for {param} param: {param_value}')
def host_config_incompatible_error(param, param_value, incompatible_param):
return errors.InvalidArgument(
f'\"{param_value}\" {param} is incompatible with {incompatible_param}'
)
| HostConfig |
python | encode__django-rest-framework | tests/test_serializer_nested.py | {
"start": 8462,
"end": 8551
} | class ____(models.Model):
address = models.CharField(max_length=100)
| NestedWriteProfile |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.